2024-11-07 18:27:12,711 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 2024-11-07 18:27:12,728 main DEBUG Took 0.015406 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-07 18:27:12,729 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-07 18:27:12,729 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-07 18:27:12,730 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-07 18:27:12,731 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 18:27:12,738 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-07 18:27:12,750 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 18:27:12,751 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 18:27:12,752 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 18:27:12,752 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 18:27:12,753 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 18:27:12,753 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 18:27:12,754 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 18:27:12,755 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 18:27:12,755 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 18:27:12,755 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 18:27:12,756 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 18:27:12,756 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 18:27:12,757 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 18:27:12,757 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 18:27:12,758 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 18:27:12,758 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 18:27:12,758 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 18:27:12,759 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 18:27:12,759 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 18:27:12,760 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 18:27:12,760 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 18:27:12,760 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 18:27:12,761 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 18:27:12,761 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-07 18:27:12,762 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 18:27:12,762 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-07 18:27:12,763 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-07 18:27:12,765 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-07 18:27:12,766 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-07 18:27:12,767 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-07 18:27:12,768 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-07 18:27:12,768 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-07 18:27:12,776 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-07 18:27:12,779 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-07 18:27:12,781 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-07 18:27:12,781 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-07 18:27:12,781 main DEBUG createAppenders(={Console}) 2024-11-07 18:27:12,782 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 initialized 2024-11-07 18:27:12,783 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 2024-11-07 18:27:12,783 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7bd7d6d6 OK. 2024-11-07 18:27:12,783 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-07 18:27:12,784 main DEBUG OutputStream closed 2024-11-07 18:27:12,784 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-07 18:27:12,784 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-07 18:27:12,785 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@25fb8912 OK 2024-11-07 18:27:12,855 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-07 18:27:12,857 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-07 18:27:12,858 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-07 18:27:12,859 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-07 18:27:12,860 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-07 18:27:12,860 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-07 18:27:12,860 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-07 18:27:12,861 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-07 18:27:12,861 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-07 18:27:12,861 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-07 18:27:12,862 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-07 18:27:12,862 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-07 18:27:12,862 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-07 18:27:12,863 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-07 18:27:12,863 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-07 18:27:12,863 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-07 18:27:12,864 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-07 18:27:12,864 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-07 18:27:12,866 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-07 18:27:12,867 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@64a40280) with optional ClassLoader: null 2024-11-07 18:27:12,867 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-07 18:27:12,868 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@64a40280] started OK. 2024-11-07T18:27:12,880 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.master.balancer.TestBalancerDecision timeout: 13 mins 2024-11-07 18:27:12,883 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-07 18:27:12,883 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-07T18:27:13,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-07T18:27:13,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T18:27:13,536 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: master.balancer.TestBalancerDecision#testBalancerDecisions Thread=12, OpenFileDescriptor=286, MaxFileDescriptor=1048576, SystemLoadAverage=84, ProcessCount=11, AvailableMemoryMB=2644 2024-11-07T18:27:13,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-07T18:27:13,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T18:27:13,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=-1.0 2024-11-07T18:27:13,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=true, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-07T18:27:13,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:13,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:13,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:13,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:13,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:13,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:13,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:13,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:13,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:13,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:13,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:13,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:13,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv244163670=0, srv779954682=1} racks are {rack=0} 2024-11-07T18:27:13,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,590 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,590 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:13,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv244163670=0, srv779954682=1} racks are {rack=0} 2024-11-07T18:27:13,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,593 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,593 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:13,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1406022542=0, srv91052572=1} racks are {rack=0} 2024-11-07T18:27:13,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,597 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,598 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,598 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:13,598 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1406022542=0, srv91052572=1} racks are {rack=0} 2024-11-07T18:27:13,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,599 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,600 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,600 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,601 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,601 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,602 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:13,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv562451637=0, srv959065496=1} racks are {rack=0} 2024-11-07T18:27:13,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:13,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv562451637=0, srv959065496=1} racks are {rack=0} 2024-11-07T18:27:13,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,608 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,608 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:13,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv466823539=1, srv1328418932=0} racks are {rack=0} 2024-11-07T18:27:13,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:13,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv466823539=1, srv1328418932=0} racks are {rack=0} 2024-11-07T18:27:13,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:13,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv466823539=1, srv1328418932=0} racks are {rack=0} 2024-11-07T18:27:13,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:13,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv748182471=1, srv135916225=0} racks are {rack=0} 2024-11-07T18:27:13,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:13,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv748182471=1, srv135916225=0} racks are {rack=0} 2024-11-07T18:27:13,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:13,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv748182471=1, srv135916225=0} racks are {rack=0} 2024-11-07T18:27:13,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:13,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv748182471=1, srv135916225=0} racks are {rack=0} 2024-11-07T18:27:13,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:13,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1021797563=0, srv164972240=1} racks are {rack=0} 2024-11-07T18:27:13,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:13,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1354634862=0, srv1534572756=1} racks are {rack=0} 2024-11-07T18:27:13,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:13,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2091920326=1, srv1525756978=0} racks are {rack=0} 2024-11-07T18:27:13,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:13,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2091920326=1, srv1525756978=0} racks are {rack=0} 2024-11-07T18:27:13,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:13,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2091920326=1, srv1525756978=0} racks are {rack=0} 2024-11-07T18:27:13,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:13,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2091920326=1, srv1525756978=0} racks are {rack=0} 2024-11-07T18:27:13,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:13,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2091920326=1, srv1525756978=0} racks are {rack=0} 2024-11-07T18:27:13,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:13,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2091920326=1, srv1525756978=0} racks are {rack=0} 2024-11-07T18:27:13,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:13,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2091920326=1, srv1525756978=0} racks are {rack=0} 2024-11-07T18:27:13,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:13,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2091920326=1, srv1525756978=0} racks are {rack=0} 2024-11-07T18:27:13,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:13,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2091920326=1, srv1525756978=0} racks are {rack=0} 2024-11-07T18:27:13,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:13,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2091920326=1, srv1525756978=0} racks are {rack=0} 2024-11-07T18:27:13,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1380 2024-11-07T18:27:13,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1381 2024-11-07T18:27:13,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table880 2024-11-07T18:27:13,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table880) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1140 2024-11-07T18:27:13,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1382 2024-11-07T18:27:13,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table881 2024-11-07T18:27:13,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table881) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1141 2024-11-07T18:27:13,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1383 2024-11-07T18:27:13,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table640 2024-11-07T18:27:13,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table640) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table882 2024-11-07T18:27:13,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table882) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1142 2024-11-07T18:27:13,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1384 2024-11-07T18:27:13,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table641 2024-11-07T18:27:13,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table641) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table883 2024-11-07T18:27:13,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table883) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1143 2024-11-07T18:27:13,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1385 2024-11-07T18:27:13,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table400 2024-11-07T18:27:13,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table642 2024-11-07T18:27:13,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table642) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table884 2024-11-07T18:27:13,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table884) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1144 2024-11-07T18:27:13,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1386 2024-11-07T18:27:13,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table401 2024-11-07T18:27:13,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table643 2024-11-07T18:27:13,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table643) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table885 2024-11-07T18:27:13,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table885) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1145 2024-11-07T18:27:13,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1387 2024-11-07T18:27:13,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table402 2024-11-07T18:27:13,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table644 2024-11-07T18:27:13,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table644) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table886 2024-11-07T18:27:13,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table886) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table403 2024-11-07T18:27:13,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table645 2024-11-07T18:27:13,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table645) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table887 2024-11-07T18:27:13,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table887) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table404 2024-11-07T18:27:13,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table646 2024-11-07T18:27:13,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table646) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table888 2024-11-07T18:27:13,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table888) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table405 2024-11-07T18:27:13,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table647 2024-11-07T18:27:13,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table647) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table889 2024-11-07T18:27:13,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table889) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table406 2024-11-07T18:27:13,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table648 2024-11-07T18:27:13,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table648) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table407 2024-11-07T18:27:13,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table649 2024-11-07T18:27:13,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table649) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table408 2024-11-07T18:27:13,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table409 2024-11-07T18:27:13,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1146 2024-11-07T18:27:13,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1388 2024-11-07T18:27:13,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1147 2024-11-07T18:27:13,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1389 2024-11-07T18:27:13,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1148 2024-11-07T18:27:13,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1149 2024-11-07T18:27:13,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1370 2024-11-07T18:27:13,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1371 2024-11-07T18:27:13,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table870 2024-11-07T18:27:13,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table870) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1130 2024-11-07T18:27:13,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1372 2024-11-07T18:27:13,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table871 2024-11-07T18:27:13,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table871) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1131 2024-11-07T18:27:13,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1373 2024-11-07T18:27:13,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table630 2024-11-07T18:27:13,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table630) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table872 2024-11-07T18:27:13,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table872) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1132 2024-11-07T18:27:13,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1374 2024-11-07T18:27:13,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table631 2024-11-07T18:27:13,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table631) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table873 2024-11-07T18:27:13,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table873) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1133 2024-11-07T18:27:13,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1375 2024-11-07T18:27:13,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table632 2024-11-07T18:27:13,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table632) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table874 2024-11-07T18:27:13,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table874) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1134 2024-11-07T18:27:13,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1376 2024-11-07T18:27:13,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table633 2024-11-07T18:27:13,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table633) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table875 2024-11-07T18:27:13,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table875) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table634 2024-11-07T18:27:13,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table634) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table876 2024-11-07T18:27:13,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table876) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table635 2024-11-07T18:27:13,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table635) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table877 2024-11-07T18:27:13,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table877) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table636 2024-11-07T18:27:13,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table636) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table878 2024-11-07T18:27:13,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table878) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table637 2024-11-07T18:27:13,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table637) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table879 2024-11-07T18:27:13,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table879) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table638 2024-11-07T18:27:13,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table638) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table639 2024-11-07T18:27:13,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table639) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1135 2024-11-07T18:27:13,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1377 2024-11-07T18:27:13,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1136 2024-11-07T18:27:13,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1378 2024-11-07T18:27:13,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1137 2024-11-07T18:27:13,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1379 2024-11-07T18:27:13,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1138 2024-11-07T18:27:13,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1139 2024-11-07T18:27:13,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table660 2024-11-07T18:27:13,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table660) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1360 2024-11-07T18:27:13,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table661 2024-11-07T18:27:13,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table661) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1361 2024-11-07T18:27:13,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table420 2024-11-07T18:27:13,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table662 2024-11-07T18:27:13,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table662) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1120 2024-11-07T18:27:13,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1362 2024-11-07T18:27:13,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table421 2024-11-07T18:27:13,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table663 2024-11-07T18:27:13,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table663) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1121 2024-11-07T18:27:13,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1363 2024-11-07T18:27:13,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table422 2024-11-07T18:27:13,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table664 2024-11-07T18:27:13,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table664) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1122 2024-11-07T18:27:13,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1364 2024-11-07T18:27:13,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table423 2024-11-07T18:27:13,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table665 2024-11-07T18:27:13,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table665) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1123 2024-11-07T18:27:13,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1365 2024-11-07T18:27:13,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table424 2024-11-07T18:27:13,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table666 2024-11-07T18:27:13,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table666) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table425 2024-11-07T18:27:13,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table667 2024-11-07T18:27:13,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table667) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table426 2024-11-07T18:27:13,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table668 2024-11-07T18:27:13,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table668) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table427 2024-11-07T18:27:13,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table669 2024-11-07T18:27:13,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table669) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table428 2024-11-07T18:27:13,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table429 2024-11-07T18:27:13,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1124 2024-11-07T18:27:13,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1366 2024-11-07T18:27:13,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1125 2024-11-07T18:27:13,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1367 2024-11-07T18:27:13,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1126 2024-11-07T18:27:13,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1368 2024-11-07T18:27:13,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1127 2024-11-07T18:27:13,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1369 2024-11-07T18:27:13,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1128 2024-11-07T18:27:13,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1129 2024-11-07T18:27:13,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table890 2024-11-07T18:27:13,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table890) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table891 2024-11-07T18:27:13,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table891) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table650 2024-11-07T18:27:13,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table650) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table892 2024-11-07T18:27:13,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table892) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1350 2024-11-07T18:27:13,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table651 2024-11-07T18:27:13,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table651) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table893 2024-11-07T18:27:13,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table893) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1351 2024-11-07T18:27:13,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table410 2024-11-07T18:27:13,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table652 2024-11-07T18:27:13,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table652) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table894 2024-11-07T18:27:13,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table894) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1110 2024-11-07T18:27:13,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1352 2024-11-07T18:27:13,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table411 2024-11-07T18:27:13,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table653 2024-11-07T18:27:13,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table653) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table895 2024-11-07T18:27:13,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table895) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1111 2024-11-07T18:27:13,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1353 2024-11-07T18:27:13,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table412 2024-11-07T18:27:13,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table654 2024-11-07T18:27:13,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table654) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table896 2024-11-07T18:27:13,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table896) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1112 2024-11-07T18:27:13,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1354 2024-11-07T18:27:13,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table413 2024-11-07T18:27:13,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table655 2024-11-07T18:27:13,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table655) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table897 2024-11-07T18:27:13,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table897) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table414 2024-11-07T18:27:13,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table656 2024-11-07T18:27:13,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table656) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table898 2024-11-07T18:27:13,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table898) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table415 2024-11-07T18:27:13,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table657 2024-11-07T18:27:13,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table657) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table899 2024-11-07T18:27:13,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table899) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table416 2024-11-07T18:27:13,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table658 2024-11-07T18:27:13,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table658) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table417 2024-11-07T18:27:13,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table659 2024-11-07T18:27:13,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table659) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table418 2024-11-07T18:27:13,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table419 2024-11-07T18:27:13,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1113 2024-11-07T18:27:13,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1355 2024-11-07T18:27:13,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1114 2024-11-07T18:27:13,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1356 2024-11-07T18:27:13,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1115 2024-11-07T18:27:13,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1357 2024-11-07T18:27:13,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1116 2024-11-07T18:27:13,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1358 2024-11-07T18:27:13,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1117 2024-11-07T18:27:13,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1359 2024-11-07T18:27:13,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1118 2024-11-07T18:27:13,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1119 2024-11-07T18:27:13,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1182 2024-11-07T18:27:13,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1183 2024-11-07T18:27:13,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1184 2024-11-07T18:27:13,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1185 2024-11-07T18:27:13,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1186 2024-11-07T18:27:13,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1187 2024-11-07T18:27:13,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table840 2024-11-07T18:27:13,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table840) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1188 2024-11-07T18:27:13,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table841 2024-11-07T18:27:13,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table841) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1189 2024-11-07T18:27:13,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table600 2024-11-07T18:27:13,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table600) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table842 2024-11-07T18:27:13,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table842) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table601 2024-11-07T18:27:13,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table601) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table843 2024-11-07T18:27:13,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table843) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table602 2024-11-07T18:27:13,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table602) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table844 2024-11-07T18:27:13,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table844) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table603 2024-11-07T18:27:13,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table603) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table845 2024-11-07T18:27:13,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table845) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table604 2024-11-07T18:27:13,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table604) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table846 2024-11-07T18:27:13,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table846) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table605 2024-11-07T18:27:13,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table605) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table847 2024-11-07T18:27:13,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table847) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table606 2024-11-07T18:27:13,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table606) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table848 2024-11-07T18:27:13,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table848) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1180 2024-11-07T18:27:13,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table607 2024-11-07T18:27:13,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table607) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table849 2024-11-07T18:27:13,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table849) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1181 2024-11-07T18:27:13,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table608 2024-11-07T18:27:13,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table608) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table609 2024-11-07T18:27:13,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table609) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1171 2024-11-07T18:27:13,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1172 2024-11-07T18:27:13,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1173 2024-11-07T18:27:13,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1174 2024-11-07T18:27:13,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1175 2024-11-07T18:27:13,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1176 2024-11-07T18:27:13,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1177 2024-11-07T18:27:13,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table830 2024-11-07T18:27:13,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table830) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1178 2024-11-07T18:27:13,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table831 2024-11-07T18:27:13,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table831) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table832 2024-11-07T18:27:13,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table832) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table833 2024-11-07T18:27:13,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table833) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table834 2024-11-07T18:27:13,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table834) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table835 2024-11-07T18:27:13,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table835) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table836 2024-11-07T18:27:13,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table836) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table837 2024-11-07T18:27:13,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table837) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table838 2024-11-07T18:27:13,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table838) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1170 2024-11-07T18:27:13,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table839 2024-11-07T18:27:13,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table839) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1179 2024-11-07T18:27:13,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1160 2024-11-07T18:27:13,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1161 2024-11-07T18:27:13,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1162 2024-11-07T18:27:13,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1163 2024-11-07T18:27:13,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table860 2024-11-07T18:27:13,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table860) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1164 2024-11-07T18:27:13,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table861 2024-11-07T18:27:13,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table861) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1165 2024-11-07T18:27:13,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table620 2024-11-07T18:27:13,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table620) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table862 2024-11-07T18:27:13,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table862) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1166 2024-11-07T18:27:13,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table621 2024-11-07T18:27:13,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table621) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table863 2024-11-07T18:27:13,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table863) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1167 2024-11-07T18:27:13,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table622 2024-11-07T18:27:13,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table622) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table864 2024-11-07T18:27:13,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table864) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table623 2024-11-07T18:27:13,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table623) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table865 2024-11-07T18:27:13,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table865) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table624 2024-11-07T18:27:13,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table624) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table866 2024-11-07T18:27:13,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table866) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table625 2024-11-07T18:27:13,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table625) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table867 2024-11-07T18:27:13,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table867) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table626 2024-11-07T18:27:13,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table626) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table868 2024-11-07T18:27:13,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table868) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table627 2024-11-07T18:27:13,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table627) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table869 2024-11-07T18:27:13,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table869) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table628 2024-11-07T18:27:13,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table628) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table629 2024-11-07T18:27:13,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table629) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1168 2024-11-07T18:27:13,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1169 2024-11-07T18:27:13,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1391 2024-11-07T18:27:13,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1150 2024-11-07T18:27:13,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1392 2024-11-07T18:27:13,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1151 2024-11-07T18:27:13,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1393 2024-11-07T18:27:13,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1152 2024-11-07T18:27:13,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1394 2024-11-07T18:27:13,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1153 2024-11-07T18:27:13,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1395 2024-11-07T18:27:13,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table850 2024-11-07T18:27:13,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table850) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1154 2024-11-07T18:27:13,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1396 2024-11-07T18:27:13,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table851 2024-11-07T18:27:13,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table851) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1155 2024-11-07T18:27:13,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1397 2024-11-07T18:27:13,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table610 2024-11-07T18:27:13,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table610) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table852 2024-11-07T18:27:13,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table852) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1156 2024-11-07T18:27:13,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1398 2024-11-07T18:27:13,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table611 2024-11-07T18:27:13,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table611) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table853 2024-11-07T18:27:13,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table853) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table612 2024-11-07T18:27:13,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table612) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table854 2024-11-07T18:27:13,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table854) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table613 2024-11-07T18:27:13,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table613) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table855 2024-11-07T18:27:13,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table855) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table614 2024-11-07T18:27:13,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table614) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table856 2024-11-07T18:27:13,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table856) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table615 2024-11-07T18:27:13,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table615) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table857 2024-11-07T18:27:13,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table857) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table616 2024-11-07T18:27:13,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table616) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table858 2024-11-07T18:27:13,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table858) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table617 2024-11-07T18:27:13,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table617) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table859 2024-11-07T18:27:13,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table859) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table618 2024-11-07T18:27:13,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table618) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1390 2024-11-07T18:27:13,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table619 2024-11-07T18:27:13,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table619) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1157 2024-11-07T18:27:13,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1399 2024-11-07T18:27:13,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1158 2024-11-07T18:27:13,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1159 2024-11-07T18:27:13,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table240 2024-11-07T18:27:13,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table482 2024-11-07T18:27:13,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table241 2024-11-07T18:27:13,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table483 2024-11-07T18:27:13,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table242 2024-11-07T18:27:13,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table484 2024-11-07T18:27:13,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table243 2024-11-07T18:27:13,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table485 2024-11-07T18:27:13,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table244 2024-11-07T18:27:13,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table486 2024-11-07T18:27:13,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table245 2024-11-07T18:27:13,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table487 2024-11-07T18:27:13,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table246 2024-11-07T18:27:13,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table488 2024-11-07T18:27:13,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table247 2024-11-07T18:27:13,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table489 2024-11-07T18:27:13,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table248 2024-11-07T18:27:13,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table249 2024-11-07T18:27:13,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1308 2024-11-07T18:27:13,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1309 2024-11-07T18:27:13,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1300 2024-11-07T18:27:13,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1301 2024-11-07T18:27:13,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1302 2024-11-07T18:27:13,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1303 2024-11-07T18:27:13,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1304 2024-11-07T18:27:13,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table490 2024-11-07T18:27:13,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1305 2024-11-07T18:27:13,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table491 2024-11-07T18:27:13,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1306 2024-11-07T18:27:13,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table250 2024-11-07T18:27:13,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table492 2024-11-07T18:27:13,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1307 2024-11-07T18:27:13,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table471 2024-11-07T18:27:13,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table230 2024-11-07T18:27:13,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table472 2024-11-07T18:27:13,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table231 2024-11-07T18:27:13,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table473 2024-11-07T18:27:13,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table232 2024-11-07T18:27:13,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table474 2024-11-07T18:27:13,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table233 2024-11-07T18:27:13,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table475 2024-11-07T18:27:13,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table234 2024-11-07T18:27:13,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table476 2024-11-07T18:27:13,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table235 2024-11-07T18:27:13,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table477 2024-11-07T18:27:13,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table236 2024-11-07T18:27:13,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table478 2024-11-07T18:27:13,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table237 2024-11-07T18:27:13,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table479 2024-11-07T18:27:13,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table238 2024-11-07T18:27:13,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table239 2024-11-07T18:27:13,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table480 2024-11-07T18:27:13,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table481 2024-11-07T18:27:13,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table262 2024-11-07T18:27:13,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table263 2024-11-07T18:27:13,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table264 2024-11-07T18:27:13,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table265 2024-11-07T18:27:13,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table266 2024-11-07T18:27:13,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table267 2024-11-07T18:27:13,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table268 2024-11-07T18:27:13,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table269 2024-11-07T18:27:13,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table270 2024-11-07T18:27:13,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table271 2024-11-07T18:27:13,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table272 2024-11-07T18:27:13,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table251 2024-11-07T18:27:13,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table493 2024-11-07T18:27:13,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table252 2024-11-07T18:27:13,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table494 2024-11-07T18:27:13,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table253 2024-11-07T18:27:13,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table495 2024-11-07T18:27:13,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table254 2024-11-07T18:27:13,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table496 2024-11-07T18:27:13,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table255 2024-11-07T18:27:13,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table497 2024-11-07T18:27:13,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table256 2024-11-07T18:27:13,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table498 2024-11-07T18:27:13,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table257 2024-11-07T18:27:13,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table499 2024-11-07T18:27:13,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:13,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:13,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:13,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:13,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:13,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:13,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:13,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:13,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table258 2024-11-07T18:27:13,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table259 2024-11-07T18:27:14,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table260 2024-11-07T18:27:14,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table261 2024-11-07T18:27:14,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table680 2024-11-07T18:27:14,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table680) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table681 2024-11-07T18:27:14,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table681) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table440 2024-11-07T18:27:14,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table682 2024-11-07T18:27:14,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table682) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table441 2024-11-07T18:27:14,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table683 2024-11-07T18:27:14,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table683) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table200 2024-11-07T18:27:14,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table442 2024-11-07T18:27:14,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table684 2024-11-07T18:27:14,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table684) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1340 2024-11-07T18:27:14,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table201 2024-11-07T18:27:14,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table443 2024-11-07T18:27:14,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table685 2024-11-07T18:27:14,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table685) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1341 2024-11-07T18:27:14,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table202 2024-11-07T18:27:14,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table444 2024-11-07T18:27:14,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table686 2024-11-07T18:27:14,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table686) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1100 2024-11-07T18:27:14,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1342 2024-11-07T18:27:14,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table203 2024-11-07T18:27:14,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table445 2024-11-07T18:27:14,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table687 2024-11-07T18:27:14,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table687) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1101 2024-11-07T18:27:14,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1343 2024-11-07T18:27:14,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table204 2024-11-07T18:27:14,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table446 2024-11-07T18:27:14,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table688 2024-11-07T18:27:14,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table688) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table205 2024-11-07T18:27:14,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table447 2024-11-07T18:27:14,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table689 2024-11-07T18:27:14,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table689) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table206 2024-11-07T18:27:14,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table448 2024-11-07T18:27:14,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table207 2024-11-07T18:27:14,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table449 2024-11-07T18:27:14,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table208 2024-11-07T18:27:14,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table209 2024-11-07T18:27:14,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1102 2024-11-07T18:27:14,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1344 2024-11-07T18:27:14,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1103 2024-11-07T18:27:14,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1345 2024-11-07T18:27:14,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1104 2024-11-07T18:27:14,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1346 2024-11-07T18:27:14,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1105 2024-11-07T18:27:14,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1347 2024-11-07T18:27:14,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1106 2024-11-07T18:27:14,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1348 2024-11-07T18:27:14,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1107 2024-11-07T18:27:14,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1349 2024-11-07T18:27:14,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1108 2024-11-07T18:27:14,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table690 2024-11-07T18:27:14,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table690) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1109 2024-11-07T18:27:14,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table670 2024-11-07T18:27:14,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table670) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table671 2024-11-07T18:27:14,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table671) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table430 2024-11-07T18:27:14,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table672 2024-11-07T18:27:14,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table672) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table431 2024-11-07T18:27:14,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table673 2024-11-07T18:27:14,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table673) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table432 2024-11-07T18:27:14,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table674 2024-11-07T18:27:14,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table674) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1330 2024-11-07T18:27:14,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table433 2024-11-07T18:27:14,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table675 2024-11-07T18:27:14,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table675) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1331 2024-11-07T18:27:14,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table434 2024-11-07T18:27:14,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table676 2024-11-07T18:27:14,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table676) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1332 2024-11-07T18:27:14,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table435 2024-11-07T18:27:14,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table677 2024-11-07T18:27:14,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table677) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table436 2024-11-07T18:27:14,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table678 2024-11-07T18:27:14,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table678) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table437 2024-11-07T18:27:14,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table679 2024-11-07T18:27:14,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table679) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table438 2024-11-07T18:27:14,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table439 2024-11-07T18:27:14,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1333 2024-11-07T18:27:14,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1334 2024-11-07T18:27:14,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1335 2024-11-07T18:27:14,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1336 2024-11-07T18:27:14,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1337 2024-11-07T18:27:14,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1338 2024-11-07T18:27:14,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1339 2024-11-07T18:27:14,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table460 2024-11-07T18:27:14,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table461 2024-11-07T18:27:14,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table220 2024-11-07T18:27:14,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table462 2024-11-07T18:27:14,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table221 2024-11-07T18:27:14,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table463 2024-11-07T18:27:14,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table222 2024-11-07T18:27:14,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,038 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,038 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,038 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,038 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table464 2024-11-07T18:27:14,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table223 2024-11-07T18:27:14,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,039 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,039 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,039 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table465 2024-11-07T18:27:14,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table224 2024-11-07T18:27:14,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,040 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,040 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,040 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table466 2024-11-07T18:27:14,040 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1320 2024-11-07T18:27:14,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,041 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,041 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table225 2024-11-07T18:27:14,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,041 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,041 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table467 2024-11-07T18:27:14,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1321 2024-11-07T18:27:14,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,042 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table226 2024-11-07T18:27:14,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table468 2024-11-07T18:27:14,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,043 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,043 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table227 2024-11-07T18:27:14,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table469 2024-11-07T18:27:14,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table228 2024-11-07T18:27:14,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table229 2024-11-07T18:27:14,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1322 2024-11-07T18:27:14,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,045 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,045 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,045 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,045 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1323 2024-11-07T18:27:14,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1324 2024-11-07T18:27:14,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,046 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,046 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,046 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1325 2024-11-07T18:27:14,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,046 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1326 2024-11-07T18:27:14,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1327 2024-11-07T18:27:14,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,047 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,047 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,047 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,047 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1328 2024-11-07T18:27:14,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table470 2024-11-07T18:27:14,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,048 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1329 2024-11-07T18:27:14,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,048 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,048 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,048 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table691 2024-11-07T18:27:14,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,049 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table691) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,049 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table450 2024-11-07T18:27:14,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,049 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,049 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table692 2024-11-07T18:27:14,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table692) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table451 2024-11-07T18:27:14,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,050 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,050 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,050 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table693 2024-11-07T18:27:14,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table693) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table210 2024-11-07T18:27:14,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table452 2024-11-07T18:27:14,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,051 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,051 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,051 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,051 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table694 2024-11-07T18:27:14,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,052 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,052 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table694) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,052 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table211 2024-11-07T18:27:14,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,052 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table453 2024-11-07T18:27:14,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,053 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,053 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,053 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table695 2024-11-07T18:27:14,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,053 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table695) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table212 2024-11-07T18:27:14,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table454 2024-11-07T18:27:14,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,054 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,054 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,054 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table696 2024-11-07T18:27:14,054 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table696) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table213 2024-11-07T18:27:14,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,055 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table455 2024-11-07T18:27:14,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table697 2024-11-07T18:27:14,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table697) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table214 2024-11-07T18:27:14,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,056 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,056 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,056 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table456 2024-11-07T18:27:14,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table698 2024-11-07T18:27:14,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,057 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,057 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table698) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,057 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1310 2024-11-07T18:27:14,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,057 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table215 2024-11-07T18:27:14,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table457 2024-11-07T18:27:14,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,058 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,058 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,058 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,058 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table699 2024-11-07T18:27:14,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,059 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,059 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table699) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,059 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table216 2024-11-07T18:27:14,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,059 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table458 2024-11-07T18:27:14,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table217 2024-11-07T18:27:14,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,060 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,060 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,060 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,060 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table459 2024-11-07T18:27:14,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table218 2024-11-07T18:27:14,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,061 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,061 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,061 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table219 2024-11-07T18:27:14,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1319 2024-11-07T18:27:14,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,062 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,062 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1311 2024-11-07T18:27:14,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,062 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,062 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1312 2024-11-07T18:27:14,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1313 2024-11-07T18:27:14,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,063 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,063 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,063 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1314 2024-11-07T18:27:14,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,063 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1315 2024-11-07T18:27:14,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1316 2024-11-07T18:27:14,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,064 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,064 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,064 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1317 2024-11-07T18:27:14,064 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1318 2024-11-07T18:27:14,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table196 2024-11-07T18:27:14,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table197 2024-11-07T18:27:14,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table198 2024-11-07T18:27:14,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table199 2024-11-07T18:27:14,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table163 2024-11-07T18:27:14,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table164 2024-11-07T18:27:14,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table165 2024-11-07T18:27:14,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table166 2024-11-07T18:27:14,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table167 2024-11-07T18:27:14,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table168 2024-11-07T18:27:14,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table169 2024-11-07T18:27:14,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table170 2024-11-07T18:27:14,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table171 2024-11-07T18:27:14,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table172 2024-11-07T18:27:14,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table173 2024-11-07T18:27:14,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-07T18:27:14,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table394 2024-11-07T18:27:14,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-07T18:27:14,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table395 2024-11-07T18:27:14,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-07T18:27:14,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table396 2024-11-07T18:27:14,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table155 2024-11-07T18:27:14,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table397 2024-11-07T18:27:14,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table156 2024-11-07T18:27:14,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table398 2024-11-07T18:27:14,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table157 2024-11-07T18:27:14,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table399 2024-11-07T18:27:14,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table158 2024-11-07T18:27:14,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table159 2024-11-07T18:27:14,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table160 2024-11-07T18:27:14,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table161 2024-11-07T18:27:14,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table162 2024-11-07T18:27:14,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table185 2024-11-07T18:27:14,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table186 2024-11-07T18:27:14,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table187 2024-11-07T18:27:14,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table188 2024-11-07T18:27:14,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table189 2024-11-07T18:27:14,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table190 2024-11-07T18:27:14,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table191 2024-11-07T18:27:14,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table192 2024-11-07T18:27:14,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table193 2024-11-07T18:27:14,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table194 2024-11-07T18:27:14,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table195 2024-11-07T18:27:14,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table174 2024-11-07T18:27:14,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table175 2024-11-07T18:27:14,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table176 2024-11-07T18:27:14,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table177 2024-11-07T18:27:14,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table178 2024-11-07T18:27:14,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table179 2024-11-07T18:27:14,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table180 2024-11-07T18:27:14,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table181 2024-11-07T18:27:14,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table182 2024-11-07T18:27:14,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table183 2024-11-07T18:27:14,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table184 2024-11-07T18:27:14,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table800 2024-11-07T18:27:14,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table800) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table801 2024-11-07T18:27:14,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table801) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table802 2024-11-07T18:27:14,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table802) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table803 2024-11-07T18:27:14,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table803) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table804 2024-11-07T18:27:14,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table804) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table805 2024-11-07T18:27:14,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table805) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table806 2024-11-07T18:27:14,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table806) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table807 2024-11-07T18:27:14,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table807) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table808 2024-11-07T18:27:14,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table808) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table809 2024-11-07T18:27:14,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table809) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table820 2024-11-07T18:27:14,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table820) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table821 2024-11-07T18:27:14,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table821) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table822 2024-11-07T18:27:14,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table822) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table823 2024-11-07T18:27:14,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table823) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table824 2024-11-07T18:27:14,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table824) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table825 2024-11-07T18:27:14,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table825) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table826 2024-11-07T18:27:14,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table826) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table827 2024-11-07T18:27:14,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table827) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table828 2024-11-07T18:27:14,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table828) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table829 2024-11-07T18:27:14,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table829) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1193 2024-11-07T18:27:14,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1194 2024-11-07T18:27:14,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1195 2024-11-07T18:27:14,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1196 2024-11-07T18:27:14,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1197 2024-11-07T18:27:14,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1198 2024-11-07T18:27:14,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1199 2024-11-07T18:27:14,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table810 2024-11-07T18:27:14,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table810) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table811 2024-11-07T18:27:14,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table811) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table812 2024-11-07T18:27:14,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table812) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table813 2024-11-07T18:27:14,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table813) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table814 2024-11-07T18:27:14,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table814) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1190 2024-11-07T18:27:14,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table815 2024-11-07T18:27:14,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table815) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1191 2024-11-07T18:27:14,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table816 2024-11-07T18:27:14,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table816) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1192 2024-11-07T18:27:14,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table817 2024-11-07T18:27:14,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table817) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table818 2024-11-07T18:27:14,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table818) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table819 2024-11-07T18:27:14,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table819) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1260 2024-11-07T18:27:14,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1261 2024-11-07T18:27:14,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table760 2024-11-07T18:27:14,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table760) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1020 2024-11-07T18:27:14,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1020) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1262 2024-11-07T18:27:14,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table761 2024-11-07T18:27:14,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table761) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1021 2024-11-07T18:27:14,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1021) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1263 2024-11-07T18:27:14,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table520 2024-11-07T18:27:14,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table520) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table762 2024-11-07T18:27:14,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table762) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1022 2024-11-07T18:27:14,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1022) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1264 2024-11-07T18:27:14,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table521 2024-11-07T18:27:14,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table521) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table763 2024-11-07T18:27:14,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table763) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1023 2024-11-07T18:27:14,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1023) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1265 2024-11-07T18:27:14,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table522 2024-11-07T18:27:14,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table522) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table764 2024-11-07T18:27:14,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table764) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1024 2024-11-07T18:27:14,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1024) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1266 2024-11-07T18:27:14,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table523 2024-11-07T18:27:14,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table523) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table765 2024-11-07T18:27:14,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table765) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table524 2024-11-07T18:27:14,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table524) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table766 2024-11-07T18:27:14,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table766) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table525 2024-11-07T18:27:14,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table525) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table767 2024-11-07T18:27:14,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table767) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table526 2024-11-07T18:27:14,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table526) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table768 2024-11-07T18:27:14,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table768) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table527 2024-11-07T18:27:14,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table527) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table769 2024-11-07T18:27:14,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table769) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table528 2024-11-07T18:27:14,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table528) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table529 2024-11-07T18:27:14,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table529) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-07T18:27:14,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-07T18:27:14,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-07T18:27:14,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-07T18:27:14,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1025 2024-11-07T18:27:14,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1025) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1267 2024-11-07T18:27:14,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-07T18:27:14,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1026 2024-11-07T18:27:14,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1026) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1268 2024-11-07T18:27:14,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-07T18:27:14,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1027 2024-11-07T18:27:14,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1027) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1269 2024-11-07T18:27:14,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-07T18:27:14,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1028 2024-11-07T18:27:14,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1028) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-07T18:27:14,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1029 2024-11-07T18:27:14,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1029) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-07T18:27:14,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-07T18:27:14,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table990 2024-11-07T18:27:14,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table990) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1250 2024-11-07T18:27:14,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table991 2024-11-07T18:27:14,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table991) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1251 2024-11-07T18:27:14,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table750 2024-11-07T18:27:14,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table750) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table992 2024-11-07T18:27:14,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table992) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1010 2024-11-07T18:27:14,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1010) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1252 2024-11-07T18:27:14,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table751 2024-11-07T18:27:14,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table751) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table993 2024-11-07T18:27:14,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table993) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1011 2024-11-07T18:27:14,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1011) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1253 2024-11-07T18:27:14,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table510 2024-11-07T18:27:14,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table510) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table752 2024-11-07T18:27:14,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table752) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table994 2024-11-07T18:27:14,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table994) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1012 2024-11-07T18:27:14,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1012) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1254 2024-11-07T18:27:14,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table511 2024-11-07T18:27:14,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table511) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table753 2024-11-07T18:27:14,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table753) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table995 2024-11-07T18:27:14,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table995) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1013 2024-11-07T18:27:14,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1013) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1255 2024-11-07T18:27:14,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table512 2024-11-07T18:27:14,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table512) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table754 2024-11-07T18:27:14,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table754) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table996 2024-11-07T18:27:14,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table996) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table513 2024-11-07T18:27:14,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table513) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table755 2024-11-07T18:27:14,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table755) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table997 2024-11-07T18:27:14,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table997) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table514 2024-11-07T18:27:14,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table514) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table756 2024-11-07T18:27:14,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table756) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table998 2024-11-07T18:27:14,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table998) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table515 2024-11-07T18:27:14,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table515) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table757 2024-11-07T18:27:14,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table757) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table999 2024-11-07T18:27:14,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table999) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table516 2024-11-07T18:27:14,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table516) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table758 2024-11-07T18:27:14,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table758) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table517 2024-11-07T18:27:14,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table517) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table759 2024-11-07T18:27:14,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table759) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table518 2024-11-07T18:27:14,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table518) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table519 2024-11-07T18:27:14,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table519) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-07T18:27:14,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-07T18:27:14,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-07T18:27:14,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1014 2024-11-07T18:27:14,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1014) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1256 2024-11-07T18:27:14,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-07T18:27:14,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1015 2024-11-07T18:27:14,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1015) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1257 2024-11-07T18:27:14,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-07T18:27:14,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1016 2024-11-07T18:27:14,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1016) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1258 2024-11-07T18:27:14,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-07T18:27:14,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1017 2024-11-07T18:27:14,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1017) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1259 2024-11-07T18:27:14,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-07T18:27:14,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1018 2024-11-07T18:27:14,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1018) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-07T18:27:14,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1019 2024-11-07T18:27:14,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1019) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-07T18:27:14,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-07T18:27:14,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-07T18:27:14,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table780 2024-11-07T18:27:14,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table780) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table781 2024-11-07T18:27:14,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table781) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table540 2024-11-07T18:27:14,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table540) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table782 2024-11-07T18:27:14,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table782) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1240 2024-11-07T18:27:14,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table541 2024-11-07T18:27:14,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table541) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table783 2024-11-07T18:27:14,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table783) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1241 2024-11-07T18:27:14,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table300 2024-11-07T18:27:14,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table542 2024-11-07T18:27:14,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table542) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table784 2024-11-07T18:27:14,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table784) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1000 2024-11-07T18:27:14,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1000) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1242 2024-11-07T18:27:14,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table301 2024-11-07T18:27:14,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table543 2024-11-07T18:27:14,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table543) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table785 2024-11-07T18:27:14,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table785) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1001 2024-11-07T18:27:14,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1001) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1243 2024-11-07T18:27:14,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table302 2024-11-07T18:27:14,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table544 2024-11-07T18:27:14,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table544) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table786 2024-11-07T18:27:14,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table786) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1002 2024-11-07T18:27:14,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1002) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1244 2024-11-07T18:27:14,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table303 2024-11-07T18:27:14,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table545 2024-11-07T18:27:14,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table545) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table787 2024-11-07T18:27:14,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table787) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table304 2024-11-07T18:27:14,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table546 2024-11-07T18:27:14,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table546) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table788 2024-11-07T18:27:14,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table788) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table305 2024-11-07T18:27:14,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table547 2024-11-07T18:27:14,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table547) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table789 2024-11-07T18:27:14,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table789) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table306 2024-11-07T18:27:14,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table548 2024-11-07T18:27:14,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table548) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table307 2024-11-07T18:27:14,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table549 2024-11-07T18:27:14,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table549) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table308 2024-11-07T18:27:14,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table309 2024-11-07T18:27:14,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-07T18:27:14,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-07T18:27:14,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1003 2024-11-07T18:27:14,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1003) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1245 2024-11-07T18:27:14,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-07T18:27:14,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1004 2024-11-07T18:27:14,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1004) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1246 2024-11-07T18:27:14,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-07T18:27:14,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1005 2024-11-07T18:27:14,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1005) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1247 2024-11-07T18:27:14,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-07T18:27:14,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1006 2024-11-07T18:27:14,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1006) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1248 2024-11-07T18:27:14,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-07T18:27:14,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1007 2024-11-07T18:27:14,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1007) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1249 2024-11-07T18:27:14,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-07T18:27:14,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1008 2024-11-07T18:27:14,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1008) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-07T18:27:14,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1009 2024-11-07T18:27:14,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1009) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-07T18:27:14,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-07T18:27:14,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-07T18:27:14,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table770 2024-11-07T18:27:14,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table770) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table771 2024-11-07T18:27:14,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table771) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table530 2024-11-07T18:27:14,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table530) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table772 2024-11-07T18:27:14,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table772) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1230 2024-11-07T18:27:14,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table531 2024-11-07T18:27:14,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table531) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table773 2024-11-07T18:27:14,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table773) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1231 2024-11-07T18:27:14,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table532 2024-11-07T18:27:14,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table532) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table774 2024-11-07T18:27:14,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table774) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1232 2024-11-07T18:27:14,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table533 2024-11-07T18:27:14,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table533) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table775 2024-11-07T18:27:14,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table775) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1233 2024-11-07T18:27:14,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table534 2024-11-07T18:27:14,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table534) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table776 2024-11-07T18:27:14,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table776) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table535 2024-11-07T18:27:14,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table535) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table777 2024-11-07T18:27:14,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table777) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table536 2024-11-07T18:27:14,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table536) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table778 2024-11-07T18:27:14,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table778) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table537 2024-11-07T18:27:14,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table537) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table779 2024-11-07T18:27:14,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table779) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table538 2024-11-07T18:27:14,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table538) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table539 2024-11-07T18:27:14,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table539) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-07T18:27:14,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1234 2024-11-07T18:27:14,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-07T18:27:14,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1235 2024-11-07T18:27:14,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-07T18:27:14,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1236 2024-11-07T18:27:14,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-07T18:27:14,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1237 2024-11-07T18:27:14,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-07T18:27:14,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1238 2024-11-07T18:27:14,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-07T18:27:14,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1239 2024-11-07T18:27:14,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-07T18:27:14,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-07T18:27:14,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1061 2024-11-07T18:27:14,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1061) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1062 2024-11-07T18:27:14,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1062) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1063 2024-11-07T18:27:14,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1063) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1064 2024-11-07T18:27:14,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1064) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1065 2024-11-07T18:27:14,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1065) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table960 2024-11-07T18:27:14,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table960) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1066 2024-11-07T18:27:14,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1066) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table961 2024-11-07T18:27:14,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table961) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1067 2024-11-07T18:27:14,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1067) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table720 2024-11-07T18:27:14,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table720) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table962 2024-11-07T18:27:14,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table962) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1068 2024-11-07T18:27:14,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1068) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table721 2024-11-07T18:27:14,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table721) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table963 2024-11-07T18:27:14,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table963) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table722 2024-11-07T18:27:14,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table722) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table964 2024-11-07T18:27:14,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table964) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table723 2024-11-07T18:27:14,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table723) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table965 2024-11-07T18:27:14,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table965) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table724 2024-11-07T18:27:14,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table724) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table966 2024-11-07T18:27:14,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table966) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table725 2024-11-07T18:27:14,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table725) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table967 2024-11-07T18:27:14,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table967) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table726 2024-11-07T18:27:14,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table726) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table968 2024-11-07T18:27:14,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table968) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table727 2024-11-07T18:27:14,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table727) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table969 2024-11-07T18:27:14,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table969) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table728 2024-11-07T18:27:14,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table728) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1060 2024-11-07T18:27:14,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1060) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table729 2024-11-07T18:27:14,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table729) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1069 2024-11-07T18:27:14,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1069) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1050 2024-11-07T18:27:14,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1050) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1292 2024-11-07T18:27:14,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1051 2024-11-07T18:27:14,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1051) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1293 2024-11-07T18:27:14,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1052 2024-11-07T18:27:14,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1052) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1294 2024-11-07T18:27:14,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1053 2024-11-07T18:27:14,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1053) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1295 2024-11-07T18:27:14,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1054 2024-11-07T18:27:14,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1054) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1296 2024-11-07T18:27:14,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1055 2024-11-07T18:27:14,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1055) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1297 2024-11-07T18:27:14,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table950 2024-11-07T18:27:14,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table950) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1056 2024-11-07T18:27:14,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1056) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1298 2024-11-07T18:27:14,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table951 2024-11-07T18:27:14,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table951) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1057 2024-11-07T18:27:14,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1057) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1299 2024-11-07T18:27:14,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table710 2024-11-07T18:27:14,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table710) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table952 2024-11-07T18:27:14,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table952) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table711 2024-11-07T18:27:14,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table711) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table953 2024-11-07T18:27:14,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table953) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table712 2024-11-07T18:27:14,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table712) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table954 2024-11-07T18:27:14,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table954) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table713 2024-11-07T18:27:14,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table713) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table955 2024-11-07T18:27:14,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table955) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table714 2024-11-07T18:27:14,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table714) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table956 2024-11-07T18:27:14,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table956) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table715 2024-11-07T18:27:14,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table715) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table957 2024-11-07T18:27:14,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table957) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table716 2024-11-07T18:27:14,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table716) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table958 2024-11-07T18:27:14,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table958) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1290 2024-11-07T18:27:14,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table717 2024-11-07T18:27:14,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table717) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table959 2024-11-07T18:27:14,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table959) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1291 2024-11-07T18:27:14,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table718 2024-11-07T18:27:14,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table718) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table719 2024-11-07T18:27:14,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table719) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1058 2024-11-07T18:27:14,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1058) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1059 2024-11-07T18:27:14,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1059) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1281 2024-11-07T18:27:14,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1040 2024-11-07T18:27:14,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1040) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1282 2024-11-07T18:27:14,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1041 2024-11-07T18:27:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1041) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1283 2024-11-07T18:27:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table980 2024-11-07T18:27:14,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table980) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1042 2024-11-07T18:27:14,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1042) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1284 2024-11-07T18:27:14,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table981 2024-11-07T18:27:14,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table981) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1043 2024-11-07T18:27:14,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1043) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1285 2024-11-07T18:27:14,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table740 2024-11-07T18:27:14,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table740) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table982 2024-11-07T18:27:14,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table982) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1044 2024-11-07T18:27:14,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1044) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1286 2024-11-07T18:27:14,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table741 2024-11-07T18:27:14,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table741) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table983 2024-11-07T18:27:14,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table983) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1045 2024-11-07T18:27:14,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1045) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1287 2024-11-07T18:27:14,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table500 2024-11-07T18:27:14,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table500) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table742 2024-11-07T18:27:14,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table742) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table984 2024-11-07T18:27:14,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table984) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1046 2024-11-07T18:27:14,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1046) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1288 2024-11-07T18:27:14,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table501 2024-11-07T18:27:14,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table501) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table743 2024-11-07T18:27:14,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table743) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table985 2024-11-07T18:27:14,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table985) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table502 2024-11-07T18:27:14,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table502) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table744 2024-11-07T18:27:14,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table744) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table986 2024-11-07T18:27:14,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table986) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table503 2024-11-07T18:27:14,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table503) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table745 2024-11-07T18:27:14,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table745) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table987 2024-11-07T18:27:14,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table987) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table504 2024-11-07T18:27:14,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table504) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table746 2024-11-07T18:27:14,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table746) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table988 2024-11-07T18:27:14,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table988) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table505 2024-11-07T18:27:14,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table505) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table747 2024-11-07T18:27:14,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table747) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table989 2024-11-07T18:27:14,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table989) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table506 2024-11-07T18:27:14,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table506) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table748 2024-11-07T18:27:14,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table748) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table507 2024-11-07T18:27:14,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table507) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table749 2024-11-07T18:27:14,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table749) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table508 2024-11-07T18:27:14,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table508) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1280 2024-11-07T18:27:14,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table509 2024-11-07T18:27:14,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table509) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1047 2024-11-07T18:27:14,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1047) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1289 2024-11-07T18:27:14,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1048 2024-11-07T18:27:14,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1048) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1049 2024-11-07T18:27:14,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1049) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1270 2024-11-07T18:27:14,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1271 2024-11-07T18:27:14,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1030 2024-11-07T18:27:14,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1030) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1272 2024-11-07T18:27:14,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1031 2024-11-07T18:27:14,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1031) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1273 2024-11-07T18:27:14,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table970 2024-11-07T18:27:14,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table970) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1032 2024-11-07T18:27:14,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1032) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1274 2024-11-07T18:27:14,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table971 2024-11-07T18:27:14,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table971) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1033 2024-11-07T18:27:14,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1033) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1275 2024-11-07T18:27:14,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table730 2024-11-07T18:27:14,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table730) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table972 2024-11-07T18:27:14,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table972) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1034 2024-11-07T18:27:14,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1034) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1276 2024-11-07T18:27:14,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table731 2024-11-07T18:27:14,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table731) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table973 2024-11-07T18:27:14,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table973) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1035 2024-11-07T18:27:14,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1035) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1277 2024-11-07T18:27:14,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table732 2024-11-07T18:27:14,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table732) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table974 2024-11-07T18:27:14,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table974) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table733 2024-11-07T18:27:14,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table733) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table975 2024-11-07T18:27:14,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table975) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table734 2024-11-07T18:27:14,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table734) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table976 2024-11-07T18:27:14,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table976) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table735 2024-11-07T18:27:14,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table735) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table977 2024-11-07T18:27:14,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table977) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table736 2024-11-07T18:27:14,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table736) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table978 2024-11-07T18:27:14,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table978) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table737 2024-11-07T18:27:14,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table737) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table979 2024-11-07T18:27:14,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table979) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table738 2024-11-07T18:27:14,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table738) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table739 2024-11-07T18:27:14,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table739) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1036 2024-11-07T18:27:14,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1036) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1278 2024-11-07T18:27:14,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1037 2024-11-07T18:27:14,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1037) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1279 2024-11-07T18:27:14,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1038 2024-11-07T18:27:14,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1038) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1039 2024-11-07T18:27:14,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1039) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:14,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table361 2024-11-07T18:27:14,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-07T18:27:14,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-07T18:27:14,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table362 2024-11-07T18:27:14,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-07T18:27:14,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-07T18:27:14,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table363 2024-11-07T18:27:14,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-07T18:27:14,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-07T18:27:14,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table364 2024-11-07T18:27:14,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-07T18:27:14,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-07T18:27:14,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table365 2024-11-07T18:27:14,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-07T18:27:14,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-07T18:27:14,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table366 2024-11-07T18:27:14,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-07T18:27:14,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-07T18:27:14,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table367 2024-11-07T18:27:14,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-07T18:27:14,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table368 2024-11-07T18:27:14,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1420 2024-11-07T18:27:14,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-07T18:27:14,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table369 2024-11-07T18:27:14,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-07T18:27:14,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-07T18:27:14,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1429 2024-11-07T18:27:14,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1421 2024-11-07T18:27:14,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1422 2024-11-07T18:27:14,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1423 2024-11-07T18:27:14,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1424 2024-11-07T18:27:14,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1425 2024-11-07T18:27:14,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:14,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1426 2024-11-07T18:27:14,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:14,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table370 2024-11-07T18:27:14,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1427 2024-11-07T18:27:14,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:14,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table371 2024-11-07T18:27:14,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1428 2024-11-07T18:27:14,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table350 2024-11-07T18:27:14,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table592 2024-11-07T18:27:14,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table592) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table351 2024-11-07T18:27:14,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table593 2024-11-07T18:27:14,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table593) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-07T18:27:14,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table352 2024-11-07T18:27:14,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table594 2024-11-07T18:27:14,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table594) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-07T18:27:14,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table353 2024-11-07T18:27:14,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table595 2024-11-07T18:27:14,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table595) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-07T18:27:14,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table354 2024-11-07T18:27:14,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table596 2024-11-07T18:27:14,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table596) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-07T18:27:14,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table355 2024-11-07T18:27:14,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table597 2024-11-07T18:27:14,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table597) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-07T18:27:14,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table356 2024-11-07T18:27:14,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table598 2024-11-07T18:27:14,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table598) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-07T18:27:14,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table357 2024-11-07T18:27:14,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table599 2024-11-07T18:27:14,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table599) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-07T18:27:14,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table358 2024-11-07T18:27:14,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-07T18:27:14,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table359 2024-11-07T18:27:14,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-07T18:27:14,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-07T18:27:14,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1418 2024-11-07T18:27:14,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1419 2024-11-07T18:27:14,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1410 2024-11-07T18:27:14,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1411 2024-11-07T18:27:14,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1412 2024-11-07T18:27:14,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1413 2024-11-07T18:27:14,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1414 2024-11-07T18:27:14,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1415 2024-11-07T18:27:14,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1416 2024-11-07T18:27:14,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table360 2024-11-07T18:27:14,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1417 2024-11-07T18:27:14,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-07T18:27:14,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table383 2024-11-07T18:27:14,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-07T18:27:14,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table384 2024-11-07T18:27:14,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-07T18:27:14,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table385 2024-11-07T18:27:14,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-07T18:27:14,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table386 2024-11-07T18:27:14,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-07T18:27:14,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table387 2024-11-07T18:27:14,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-07T18:27:14,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table388 2024-11-07T18:27:14,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-07T18:27:14,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table389 2024-11-07T18:27:14,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-07T18:27:14,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-07T18:27:14,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:14,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1407 2024-11-07T18:27:14,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:14,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1408 2024-11-07T18:27:14,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:14,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1409 2024-11-07T18:27:14,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1400 2024-11-07T18:27:14,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1401 2024-11-07T18:27:14,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1402 2024-11-07T18:27:14,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:14,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table390 2024-11-07T18:27:14,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1403 2024-11-07T18:27:14,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:14,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table391 2024-11-07T18:27:14,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1404 2024-11-07T18:27:14,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:14,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-07T18:27:14,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table392 2024-11-07T18:27:14,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1405 2024-11-07T18:27:14,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:14,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-07T18:27:14,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table393 2024-11-07T18:27:14,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1406 2024-11-07T18:27:14,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-07T18:27:14,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table372 2024-11-07T18:27:14,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-07T18:27:14,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table373 2024-11-07T18:27:14,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-07T18:27:14,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table374 2024-11-07T18:27:14,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-07T18:27:14,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table375 2024-11-07T18:27:14,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-07T18:27:14,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table376 2024-11-07T18:27:14,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-07T18:27:14,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table377 2024-11-07T18:27:14,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-07T18:27:14,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table378 2024-11-07T18:27:14,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-07T18:27:14,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table379 2024-11-07T18:27:14,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-07T18:27:14,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-07T18:27:14,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table380 2024-11-07T18:27:14,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table381 2024-11-07T18:27:14,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-07T18:27:14,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table382 2024-11-07T18:27:14,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-07T18:27:14,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-07T18:27:14,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table560 2024-11-07T18:27:14,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table560) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-07T18:27:14,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table561 2024-11-07T18:27:14,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table561) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table320 2024-11-07T18:27:14,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table562 2024-11-07T18:27:14,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table562) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table321 2024-11-07T18:27:14,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table563 2024-11-07T18:27:14,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table563) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table322 2024-11-07T18:27:14,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table564 2024-11-07T18:27:14,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table564) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1220 2024-11-07T18:27:14,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table323 2024-11-07T18:27:14,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table565 2024-11-07T18:27:14,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table565) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1221 2024-11-07T18:27:14,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table324 2024-11-07T18:27:14,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table566 2024-11-07T18:27:14,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table566) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1222 2024-11-07T18:27:14,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table325 2024-11-07T18:27:14,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table567 2024-11-07T18:27:14,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table567) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table326 2024-11-07T18:27:14,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table568 2024-11-07T18:27:14,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table568) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table327 2024-11-07T18:27:14,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table569 2024-11-07T18:27:14,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table569) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table328 2024-11-07T18:27:14,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table329 2024-11-07T18:27:14,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1223 2024-11-07T18:27:14,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-07T18:27:14,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1224 2024-11-07T18:27:14,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-07T18:27:14,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1225 2024-11-07T18:27:14,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-07T18:27:14,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1226 2024-11-07T18:27:14,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-07T18:27:14,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1227 2024-11-07T18:27:14,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-07T18:27:14,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1228 2024-11-07T18:27:14,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-07T18:27:14,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1229 2024-11-07T18:27:14,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-07T18:27:14,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-07T18:27:14,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table790 2024-11-07T18:27:14,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table790) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-07T18:27:14,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table791 2024-11-07T18:27:14,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table791) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-07T18:27:14,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table550 2024-11-07T18:27:14,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table550) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table792 2024-11-07T18:27:14,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table792) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-07T18:27:14,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table551 2024-11-07T18:27:14,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table551) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table793 2024-11-07T18:27:14,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table793) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table310 2024-11-07T18:27:14,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table552 2024-11-07T18:27:14,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table552) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table794 2024-11-07T18:27:14,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table794) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table311 2024-11-07T18:27:14,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table553 2024-11-07T18:27:14,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table553) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table795 2024-11-07T18:27:14,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table795) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table312 2024-11-07T18:27:14,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table554 2024-11-07T18:27:14,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table554) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table796 2024-11-07T18:27:14,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table796) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1210 2024-11-07T18:27:14,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table313 2024-11-07T18:27:14,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table555 2024-11-07T18:27:14,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table555) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table797 2024-11-07T18:27:14,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table797) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1211 2024-11-07T18:27:14,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table314 2024-11-07T18:27:14,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table556 2024-11-07T18:27:14,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table556) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table798 2024-11-07T18:27:14,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table798) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table315 2024-11-07T18:27:14,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table557 2024-11-07T18:27:14,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table557) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table799 2024-11-07T18:27:14,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table799) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table316 2024-11-07T18:27:14,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table558 2024-11-07T18:27:14,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table558) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table317 2024-11-07T18:27:14,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table559 2024-11-07T18:27:14,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table559) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table318 2024-11-07T18:27:14,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table319 2024-11-07T18:27:14,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1212 2024-11-07T18:27:14,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1213 2024-11-07T18:27:14,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-07T18:27:14,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1214 2024-11-07T18:27:14,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-07T18:27:14,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1215 2024-11-07T18:27:14,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-07T18:27:14,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1216 2024-11-07T18:27:14,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-07T18:27:14,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1217 2024-11-07T18:27:14,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-07T18:27:14,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1218 2024-11-07T18:27:14,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-07T18:27:14,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1219 2024-11-07T18:27:14,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-07T18:27:14,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table581 2024-11-07T18:27:14,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table581) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-07T18:27:14,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table340 2024-11-07T18:27:14,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table582 2024-11-07T18:27:14,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table582) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-07T18:27:14,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table341 2024-11-07T18:27:14,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table583 2024-11-07T18:27:14,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table583) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-07T18:27:14,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-07T18:27:14,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table342 2024-11-07T18:27:14,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table584 2024-11-07T18:27:14,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table584) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-07T18:27:14,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-07T18:27:14,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table343 2024-11-07T18:27:14,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table585 2024-11-07T18:27:14,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table585) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-07T18:27:14,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table344 2024-11-07T18:27:14,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table586 2024-11-07T18:27:14,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table586) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-07T18:27:14,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table345 2024-11-07T18:27:14,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table587 2024-11-07T18:27:14,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table587) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-07T18:27:14,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table346 2024-11-07T18:27:14,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table588 2024-11-07T18:27:14,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table588) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1200 2024-11-07T18:27:14,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-07T18:27:14,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table347 2024-11-07T18:27:14,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table589 2024-11-07T18:27:14,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table589) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-07T18:27:14,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table348 2024-11-07T18:27:14,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-07T18:27:14,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table349 2024-11-07T18:27:14,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-07T18:27:14,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-07T18:27:14,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1209 2024-11-07T18:27:14,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1201 2024-11-07T18:27:14,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1202 2024-11-07T18:27:14,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1203 2024-11-07T18:27:14,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-07T18:27:14,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1204 2024-11-07T18:27:14,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-07T18:27:14,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1205 2024-11-07T18:27:14,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-07T18:27:14,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1206 2024-11-07T18:27:14,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-07T18:27:14,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table590 2024-11-07T18:27:14,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table590) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1207 2024-11-07T18:27:14,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-07T18:27:14,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table591 2024-11-07T18:27:14,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table591) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1208 2024-11-07T18:27:14,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-07T18:27:14,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table570 2024-11-07T18:27:14,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table570) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-07T18:27:14,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table571 2024-11-07T18:27:14,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table571) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-07T18:27:14,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table330 2024-11-07T18:27:14,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table572 2024-11-07T18:27:14,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table572) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-07T18:27:14,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table331 2024-11-07T18:27:14,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table573 2024-11-07T18:27:14,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table573) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-07T18:27:14,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table332 2024-11-07T18:27:14,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table574 2024-11-07T18:27:14,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table574) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-07T18:27:14,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table333 2024-11-07T18:27:14,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table575 2024-11-07T18:27:14,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table575) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table334 2024-11-07T18:27:14,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table576 2024-11-07T18:27:14,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table576) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1430 2024-11-07T18:27:14,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table335 2024-11-07T18:27:14,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table577 2024-11-07T18:27:14,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table577) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1431 2024-11-07T18:27:14,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table336 2024-11-07T18:27:14,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table578 2024-11-07T18:27:14,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table578) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table337 2024-11-07T18:27:14,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table579 2024-11-07T18:27:14,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table579) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table338 2024-11-07T18:27:14,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table339 2024-11-07T18:27:14,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-07T18:27:14,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-07T18:27:14,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-07T18:27:14,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-07T18:27:14,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table580 2024-11-07T18:27:14,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table580) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table284 2024-11-07T18:27:14,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table285 2024-11-07T18:27:14,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table286 2024-11-07T18:27:14,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table287 2024-11-07T18:27:14,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table288 2024-11-07T18:27:14,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table289 2024-11-07T18:27:14,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table290 2024-11-07T18:27:14,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table291 2024-11-07T18:27:14,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table292 2024-11-07T18:27:14,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table293 2024-11-07T18:27:14,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table294 2024-11-07T18:27:14,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table273 2024-11-07T18:27:14,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table274 2024-11-07T18:27:14,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table275 2024-11-07T18:27:14,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table276 2024-11-07T18:27:14,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table277 2024-11-07T18:27:14,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table278 2024-11-07T18:27:14,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table279 2024-11-07T18:27:14,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table280 2024-11-07T18:27:14,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table281 2024-11-07T18:27:14,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table282 2024-11-07T18:27:14,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table283 2024-11-07T18:27:14,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table295 2024-11-07T18:27:14,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table296 2024-11-07T18:27:14,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table297 2024-11-07T18:27:14,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table298 2024-11-07T18:27:14,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table299 2024-11-07T18:27:14,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table920 2024-11-07T18:27:14,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table920) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table921 2024-11-07T18:27:14,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table921) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table922 2024-11-07T18:27:14,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table922) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table923 2024-11-07T18:27:14,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table923) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table924 2024-11-07T18:27:14,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table924) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table925 2024-11-07T18:27:14,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table925) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table926 2024-11-07T18:27:14,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table926) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table927 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table927) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table928 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table928) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table929 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table929) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1094 2024-11-07T18:27:14,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1094) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1095 2024-11-07T18:27:14,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1095) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1096 2024-11-07T18:27:14,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1096) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1097 2024-11-07T18:27:14,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1097) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1098 2024-11-07T18:27:14,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1098) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1099 2024-11-07T18:27:14,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1099) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table910 2024-11-07T18:27:14,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table910) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table911 2024-11-07T18:27:14,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table911) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table912 2024-11-07T18:27:14,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table912) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1090 2024-11-07T18:27:14,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1090) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table913 2024-11-07T18:27:14,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table913) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1091 2024-11-07T18:27:14,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1091) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table914 2024-11-07T18:27:14,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table914) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1092 2024-11-07T18:27:14,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1092) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table915 2024-11-07T18:27:14,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table915) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1093 2024-11-07T18:27:14,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1093) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table916 2024-11-07T18:27:14,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table916) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table917 2024-11-07T18:27:14,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table917) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table918 2024-11-07T18:27:14,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table918) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table919 2024-11-07T18:27:14,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table919) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1083 2024-11-07T18:27:14,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1083) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1084 2024-11-07T18:27:14,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1084) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1085 2024-11-07T18:27:14,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1085) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1086 2024-11-07T18:27:14,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1086) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1087 2024-11-07T18:27:14,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1087) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1088 2024-11-07T18:27:14,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1088) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1089 2024-11-07T18:27:14,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1089) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table940 2024-11-07T18:27:14,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table940) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table941 2024-11-07T18:27:14,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table941) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table700 2024-11-07T18:27:14,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table700) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table942 2024-11-07T18:27:14,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table942) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table701 2024-11-07T18:27:14,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table701) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table943 2024-11-07T18:27:14,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table943) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table702 2024-11-07T18:27:14,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table702) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table944 2024-11-07T18:27:14,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table944) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table703 2024-11-07T18:27:14,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table703) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table945 2024-11-07T18:27:14,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table945) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table704 2024-11-07T18:27:14,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table704) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table946 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table946) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1080 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1080) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table705 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table705) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table947 2024-11-07T18:27:14,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table947) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1081 2024-11-07T18:27:14,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1081) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table706 2024-11-07T18:27:14,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table706) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table948 2024-11-07T18:27:14,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table948) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1082 2024-11-07T18:27:14,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1082) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table707 2024-11-07T18:27:14,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table707) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table949 2024-11-07T18:27:14,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table949) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table708 2024-11-07T18:27:14,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table708) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table709 2024-11-07T18:27:14,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table709) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1072 2024-11-07T18:27:14,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1072) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1073 2024-11-07T18:27:14,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1073) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1074 2024-11-07T18:27:14,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1074) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1075 2024-11-07T18:27:14,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1075) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1076 2024-11-07T18:27:14,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1076) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1077 2024-11-07T18:27:14,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1077) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1078 2024-11-07T18:27:14,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1078) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1079 2024-11-07T18:27:14,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1079) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table930 2024-11-07T18:27:14,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table930) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table931 2024-11-07T18:27:14,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table931) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table932 2024-11-07T18:27:14,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table932) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table933 2024-11-07T18:27:14,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table933) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table934 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table934) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table935 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table935) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table936 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table936) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1070 2024-11-07T18:27:14,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1070) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table937 2024-11-07T18:27:14,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table937) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1071 2024-11-07T18:27:14,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1071) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table938 2024-11-07T18:27:14,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table938) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table939 2024-11-07T18:27:14,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table939) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table900 2024-11-07T18:27:14,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table900) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table901 2024-11-07T18:27:14,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table901) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table902 2024-11-07T18:27:14,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table902) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table903 2024-11-07T18:27:14,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table903) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table904 2024-11-07T18:27:14,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table904) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table905 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table905) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table906 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table906) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table907 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table907) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table908 2024-11-07T18:27:14,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table908) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table909 2024-11-07T18:27:14,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv751934877=1, srv1000197244=0} racks are {rack=0} 2024-11-07T18:27:14,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table909) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:14,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-07T18:27:14,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-07T18:27:14,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-07T18:27:14,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-07T18:27:14,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-07T18:27:14,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-07T18:27:14,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-07T18:27:14,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-07T18:27:14,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-07T18:27:14,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:14,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:14,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:14,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-07T18:27:14,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-07T18:27:14,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-07T18:27:14,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-07T18:27:14,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-07T18:27:14,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-07T18:27:14,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-07T18:27:14,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-07T18:27:14,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-07T18:27:14,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-07T18:27:14,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-07T18:27:14,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-07T18:27:14,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:14,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:14,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:14,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-07T18:27:14,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:14,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-07T18:27:14,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:14,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-07T18:27:14,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:14,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-07T18:27:14,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:14,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-07T18:27:14,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-07T18:27:14,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-07T18:27:14,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-07T18:27:14,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-07T18:27:14,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-07T18:27:14,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-07T18:27:14,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-07T18:27:14,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-07T18:27:14,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-07T18:27:14,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-07T18:27:14,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1902375425=1, srv153691653=0} racks are {rack=0} 2024-11-07T18:27:14,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:14,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv718619830=1, srv1474135348=0, srv879260538=2} racks are {rack=0} 2024-11-07T18:27:14,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv718619830=1, srv1474135348=0, srv879260538=2} racks are {rack=0} 2024-11-07T18:27:14,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv795533538=2, srv287321374=1, srv1652780667=0} racks are {rack=0} 2024-11-07T18:27:14,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv795533538=2, srv287321374=1, srv1652780667=0} racks are {rack=0} 2024-11-07T18:27:14,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv795533538=2, srv287321374=1, srv1652780667=0} racks are {rack=0} 2024-11-07T18:27:14,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1489923209=1, srv1407397201=0, srv409689176=2} racks are {rack=0} 2024-11-07T18:27:14,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1489923209=1, srv1407397201=0, srv409689176=2} racks are {rack=0} 2024-11-07T18:27:14,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv121520216=0, srv1933402119=2, srv1467810015=1} racks are {rack=0} 2024-11-07T18:27:14,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv121520216=0, srv1933402119=2, srv1467810015=1} racks are {rack=0} 2024-11-07T18:27:14,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv121520216=0, srv1933402119=2, srv1467810015=1} racks are {rack=0} 2024-11-07T18:27:14,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:14,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv328101325=2, srv116195493=1, srv115379375=0} racks are {rack=0} 2024-11-07T18:27:14,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv328101325=2, srv116195493=1, srv115379375=0} racks are {rack=0} 2024-11-07T18:27:14,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv328101325=2, srv116195493=1, srv115379375=0} racks are {rack=0} 2024-11-07T18:27:14,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv328101325=2, srv116195493=1, srv115379375=0} racks are {rack=0} 2024-11-07T18:27:14,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:14,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-07T18:27:14,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-07T18:27:14,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-07T18:27:14,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-07T18:27:14,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-07T18:27:14,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-07T18:27:14,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:14,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:14,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:14,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:14,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:14,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:14,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:14,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:14,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv398363030=2, srv195358189=0, srv2007669436=1} racks are {rack=0} 2024-11-07T18:27:14,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=3, number of racks=1 2024-11-07T18:27:14,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv875365252=3, srv590043545=1, srv539438744=0, srv697826020=2} racks are {rack=0} 2024-11-07T18:27:14,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv875365252=3, srv590043545=1, srv539438744=0, srv697826020=2} racks are {rack=0} 2024-11-07T18:27:14,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv875365252=3, srv590043545=1, srv539438744=0, srv697826020=2} racks are {rack=0} 2024-11-07T18:27:14,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:14,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv684688036=1, srv838832510=3, srv698934111=2, srv684284959=0} racks are {rack=0} 2024-11-07T18:27:14,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv684688036=1, srv838832510=3, srv698934111=2, srv684284959=0} racks are {rack=0} 2024-11-07T18:27:14,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv684688036=1, srv838832510=3, srv698934111=2, srv684284959=0} racks are {rack=0} 2024-11-07T18:27:14,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv684688036=1, srv838832510=3, srv698934111=2, srv684284959=0} racks are {rack=0} 2024-11-07T18:27:14,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:14,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1782224039=1, srv1548413012=0, srv2122383679=3, srv1830356295=2} racks are {rack=0} 2024-11-07T18:27:14,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1782224039=1, srv1548413012=0, srv2122383679=3, srv1830356295=2} racks are {rack=0} 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1782224039=1, srv1548413012=0, srv2122383679=3, srv1830356295=2} racks are {rack=0} 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1782224039=1, srv1548413012=0, srv2122383679=3, srv1830356295=2} racks are {rack=0} 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1782224039=1, srv1548413012=0, srv2122383679=3, srv1830356295=2} racks are {rack=0} 2024-11-07T18:27:14,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:14,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1448185493=0, srv1566837793=1, srv705622649=3, srv2131462230=2} racks are {rack=0} 2024-11-07T18:27:14,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1448185493=0, srv1566837793=1, srv705622649=3, srv2131462230=2} racks are {rack=0} 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1448185493=0, srv1566837793=1, srv705622649=3, srv2131462230=2} racks are {rack=0} 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1448185493=0, srv1566837793=1, srv705622649=3, srv2131462230=2} racks are {rack=0} 2024-11-07T18:27:14,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1448185493=0, srv1566837793=1, srv705622649=3, srv2131462230=2} racks are {rack=0} 2024-11-07T18:27:14,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1448185493=0, srv1566837793=1, srv705622649=3, srv2131462230=2} racks are {rack=0} 2024-11-07T18:27:14,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:14,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1968649384=1, srv1315046522=0, srv735477004=2, srv837743722=3} racks are {rack=0} 2024-11-07T18:27:14,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:14,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1968649384=1, srv1315046522=0, srv735477004=2, srv837743722=3} racks are {rack=0} 2024-11-07T18:27:14,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:14,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1968649384=1, srv1315046522=0, srv735477004=2, srv837743722=3} racks are {rack=0} 2024-11-07T18:27:14,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1968649384=1, srv1315046522=0, srv735477004=2, srv837743722=3} racks are {rack=0} 2024-11-07T18:27:14,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1968649384=1, srv1315046522=0, srv735477004=2, srv837743722=3} racks are {rack=0} 2024-11-07T18:27:14,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1968649384=1, srv1315046522=0, srv735477004=2, srv837743722=3} racks are {rack=0} 2024-11-07T18:27:14,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:14,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv943432187=3, srv155965857=0, srv1677379334=1, srv2133651358=2} racks are {rack=0} 2024-11-07T18:27:14,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:14,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv943432187=3, srv155965857=0, srv1677379334=1, srv2133651358=2} racks are {rack=0} 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv943432187=3, srv155965857=0, srv1677379334=1, srv2133651358=2} racks are {rack=0} 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv943432187=3, srv155965857=0, srv1677379334=1, srv2133651358=2} racks are {rack=0} 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv943432187=3, srv155965857=0, srv1677379334=1, srv2133651358=2} racks are {rack=0} 2024-11-07T18:27:14,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv943432187=3, srv155965857=0, srv1677379334=1, srv2133651358=2} racks are {rack=0} 2024-11-07T18:27:14,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2048441672=2, srv263042237=3, srv165728184=1, srv1574838617=0} racks are {rack=0} 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2048441672=2, srv263042237=3, srv165728184=1, srv1574838617=0} racks are {rack=0} 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2048441672=2, srv263042237=3, srv165728184=1, srv1574838617=0} racks are {rack=0} 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2048441672=2, srv263042237=3, srv165728184=1, srv1574838617=0} racks are {rack=0} 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2048441672=2, srv263042237=3, srv165728184=1, srv1574838617=0} racks are {rack=0} 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2048441672=2, srv263042237=3, srv165728184=1, srv1574838617=0} racks are {rack=0} 2024-11-07T18:27:14,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:14,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1095811938=0, srv1411857610=1, srv510030444=2, srv553997533=3} racks are {rack=0} 2024-11-07T18:27:14,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:14,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1095811938=0, srv1411857610=1, srv510030444=2, srv553997533=3} racks are {rack=0} 2024-11-07T18:27:14,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:14,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1095811938=0, srv1411857610=1, srv510030444=2, srv553997533=3} racks are {rack=0} 2024-11-07T18:27:14,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1095811938=0, srv1411857610=1, srv510030444=2, srv553997533=3} racks are {rack=0} 2024-11-07T18:27:14,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1095811938=0, srv1411857610=1, srv510030444=2, srv553997533=3} racks are {rack=0} 2024-11-07T18:27:14,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1095811938=0, srv1411857610=1, srv510030444=2, srv553997533=3} racks are {rack=0} 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1095811938=0, srv1411857610=1, srv510030444=2, srv553997533=3} racks are {rack=0} 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:14,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv514234344=3, srv1958544453=2, srv190836526=1, srv1330665168=0} racks are {rack=0} 2024-11-07T18:27:14,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:14,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv514234344=3, srv1958544453=2, srv190836526=1, srv1330665168=0} racks are {rack=0} 2024-11-07T18:27:14,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:14,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv514234344=3, srv1958544453=2, srv190836526=1, srv1330665168=0} racks are {rack=0} 2024-11-07T18:27:14,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv514234344=3, srv1958544453=2, srv190836526=1, srv1330665168=0} racks are {rack=0} 2024-11-07T18:27:14,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv514234344=3, srv1958544453=2, srv190836526=1, srv1330665168=0} racks are {rack=0} 2024-11-07T18:27:14,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv514234344=3, srv1958544453=2, srv190836526=1, srv1330665168=0} racks are {rack=0} 2024-11-07T18:27:14,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:14,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv514234344=3, srv1958544453=2, srv190836526=1, srv1330665168=0} racks are {rack=0} 2024-11-07T18:27:14,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:14,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv514234344=3, srv1958544453=2, srv190836526=1, srv1330665168=0} racks are {rack=0} 2024-11-07T18:27:14,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv932012028=2, srv976043991=3, srv317449771=0, srv767998270=1} racks are {rack=0} 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv932012028=2, srv976043991=3, srv317449771=0, srv767998270=1} racks are {rack=0} 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv932012028=2, srv976043991=3, srv317449771=0, srv767998270=1} racks are {rack=0} 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv932012028=2, srv976043991=3, srv317449771=0, srv767998270=1} racks are {rack=0} 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv932012028=2, srv976043991=3, srv317449771=0, srv767998270=1} racks are {rack=0} 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv932012028=2, srv976043991=3, srv317449771=0, srv767998270=1} racks are {rack=0} 2024-11-07T18:27:14,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:14,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv932012028=2, srv976043991=3, srv317449771=0, srv767998270=1} racks are {rack=0} 2024-11-07T18:27:14,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=4, number of racks=1 2024-11-07T18:27:14,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:14,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv460032389=1, srv117397636=0, srv960203600=4, srv78473462=3, srv539943773=2} racks are {rack=0} 2024-11-07T18:27:14,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-07T18:27:14,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv460032389=1, srv117397636=0, srv960203600=4, srv78473462=3, srv539943773=2} racks are {rack=0} 2024-11-07T18:27:14,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv460032389=1, srv117397636=0, srv960203600=4, srv78473462=3, srv539943773=2} racks are {rack=0} 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv460032389=1, srv117397636=0, srv960203600=4, srv78473462=3, srv539943773=2} racks are {rack=0} 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-07T18:27:14,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1380 2024-11-07T18:27:14,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1381 2024-11-07T18:27:14,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table880 2024-11-07T18:27:14,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table880) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1140 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1382 2024-11-07T18:27:14,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table881 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table881) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1141 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1383 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table640 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table640) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table882 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table882) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1142 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1384 2024-11-07T18:27:14,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table641 2024-11-07T18:27:14,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table641) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table883 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table883) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1143 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1385 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table400 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table642 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table642) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table884 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table884) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1144 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1386 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table401 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table643 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table643) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table885 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table885) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1145 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1387 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table402 2024-11-07T18:27:14,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table644 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table644) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table886 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table886) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table403 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table645 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table645) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table887 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table887) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table404 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,517 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table646 2024-11-07T18:27:14,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table646) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table888 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table888) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table405 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table647 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table647) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table889 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table889) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table406 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table648 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table648) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table407 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table649 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table649) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table408 2024-11-07T18:27:14,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table409 2024-11-07T18:27:14,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1146 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1388 2024-11-07T18:27:14,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1147 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1389 2024-11-07T18:27:14,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1148 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1149 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1370 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1371 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table870 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table870) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1130 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1372 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table871 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table871) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1131 2024-11-07T18:27:14,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1373 2024-11-07T18:27:14,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table630 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table630) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table872 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table872) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1132 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1374 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table631 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table631) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table873 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table873) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1133 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1375 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table632 2024-11-07T18:27:14,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table632) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table874 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table874) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1134 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1376 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table633 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,536 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table633) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table875 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,536 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table875) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table634 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table634) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table876 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table876) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table635 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table635) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table877 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table877) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table636 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table636) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table878 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table878) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table637 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table637) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table879 2024-11-07T18:27:14,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table879) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table638 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table638) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table639 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table639) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1135 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1377 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1136 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1378 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1137 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1379 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1138 2024-11-07T18:27:14,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1139 2024-11-07T18:27:14,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table660 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table660) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1360 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table661 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table661) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1361 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table420 2024-11-07T18:27:14,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table662 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table662) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1120 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1362 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table421 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table663 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table663) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1121 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1363 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table422 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table664 2024-11-07T18:27:14,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table664) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1122 2024-11-07T18:27:14,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1364 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table423 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table665 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table665) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1123 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1365 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table424 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table666 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table666) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table425 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table667 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table667) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table426 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table668 2024-11-07T18:27:14,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table668) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table427 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table669 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table669) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table428 2024-11-07T18:27:14,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table429 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1124 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1366 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1125 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1367 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1126 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1368 2024-11-07T18:27:14,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1127 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1369 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1128 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1129 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table890 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table890) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table891 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table891) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table650 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table650) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table892 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table892) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1350 2024-11-07T18:27:14,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table651 2024-11-07T18:27:14,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table651) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table893 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table893) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1351 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table410 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table652 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table652) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table894 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table894) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1110 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1352 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table411 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table653 2024-11-07T18:27:14,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table653) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table895 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table895) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1111 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1353 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table412 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table654 2024-11-07T18:27:14,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table654) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table896 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table896) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1112 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1354 2024-11-07T18:27:14,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table413 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table655 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table655) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table897 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table897) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table414 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table656 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table656) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table898 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table898) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table415 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table657 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,578 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table657) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table899 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table899) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table416 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table658 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,580 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table658) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table417 2024-11-07T18:27:14,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table659 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table659) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table418 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,582 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,582 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table419 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,582 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,582 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1113 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1355 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1114 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1356 2024-11-07T18:27:14,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1115 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1357 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1116 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1358 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,585 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,585 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1117 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,586 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,586 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1359 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,586 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,586 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,586 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1118 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,586 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,587 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,587 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1119 2024-11-07T18:27:14,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,587 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,587 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1182 2024-11-07T18:27:14,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,588 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,588 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1183 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,588 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,588 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1184 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1185 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1186 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,589 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1187 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table840 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table840) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,590 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1188 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table841 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table841) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1189 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table600 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table600) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table842 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table842) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,592 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table601 2024-11-07T18:27:14,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table601) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table843 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table843) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table602 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,594 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table602) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,594 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table844 2024-11-07T18:27:14,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,594 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table844) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,594 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table603 2024-11-07T18:27:14,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table603) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table845 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table845) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table604 2024-11-07T18:27:14,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,601 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table604) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table846 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table846) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table605 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,602 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,602 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table605) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table847 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table847) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table606 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table606) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table848 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,603 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,603 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table848) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1180 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table607 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table607) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table849 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table849) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,604 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1181 2024-11-07T18:27:14,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table608 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table608) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table609 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table609) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1171 2024-11-07T18:27:14,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1172 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1173 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1174 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1175 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1176 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1177 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table830 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table830) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1178 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,608 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table831 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table831) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table832 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table832) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table833 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table833) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table834 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table834) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table835 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table835) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table836 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table836) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table837 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table837) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table838 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table838) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1170 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table839 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table839) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1179 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1160 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1161 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1162 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1163 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table860 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table860) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1164 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table861 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table861) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1165 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table620 2024-11-07T18:27:14,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table620) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table862 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table862) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1166 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table621 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table621) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table863 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table863) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1167 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table622 2024-11-07T18:27:14,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table622) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table864 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table864) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table623 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table623) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table865 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table865) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table624 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table624) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table866 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table866) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table625 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table625) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table867 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table867) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table626 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table626) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table868 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table868) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table627 2024-11-07T18:27:14,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table627) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table869 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table869) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table628 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table628) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table629 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table629) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1168 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1169 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1391 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1150 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1392 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1151 2024-11-07T18:27:14,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1393 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1152 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1394 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1153 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1395 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table850 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table850) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1154 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1396 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table851 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table851) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1155 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1397 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table610 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table610) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table852 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table852) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1156 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1398 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table611 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table611) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table853 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table853) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table612 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table612) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table854 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table854) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table613 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table613) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table855 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table855) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table614 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table614) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table856 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table856) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table615 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table615) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table857 2024-11-07T18:27:14,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table857) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table616 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table616) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table858 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table858) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table617 2024-11-07T18:27:14,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table617) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table859 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table859) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table618 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table618) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1390 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table619 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table619) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1157 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1399 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1158 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1159 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table240 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table482 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table241 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table483 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table242 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table484 2024-11-07T18:27:14,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table243 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table485 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table244 2024-11-07T18:27:14,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table486 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table245 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table487 2024-11-07T18:27:14,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table246 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table488 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table247 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table489 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table248 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table249 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1308 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1309 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1300 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1301 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1302 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1303 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1304 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table490 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1305 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table491 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1306 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table250 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table492 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1307 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table471 2024-11-07T18:27:14,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table230 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table472 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table231 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table473 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table232 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table474 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table233 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table475 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table234 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table476 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table235 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table477 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table236 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table478 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table237 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table479 2024-11-07T18:27:14,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table238 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table239 2024-11-07T18:27:14,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table480 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table481 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table262 2024-11-07T18:27:14,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table263 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table264 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table265 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table266 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table267 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table268 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table269 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table270 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table271 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table272 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table251 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table493 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table252 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table494 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table253 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table495 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table254 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table496 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table255 2024-11-07T18:27:14,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table497 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table256 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table498 2024-11-07T18:27:14,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table257 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table499 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table258 2024-11-07T18:27:14,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table259 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table260 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table261 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table680 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table680) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table681 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table681) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table440 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table682 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table682) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table441 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table683 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table683) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table200 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table442 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table684 2024-11-07T18:27:14,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table684) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1340 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table201 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table443 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table685 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table685) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1341 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table202 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table444 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table686 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table686) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1100 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1342 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table203 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table445 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table687 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table687) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1101 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1343 2024-11-07T18:27:14,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table204 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table446 2024-11-07T18:27:14,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table688 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table688) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table205 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table447 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table689 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table689) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table206 2024-11-07T18:27:14,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table448 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table207 2024-11-07T18:27:14,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table449 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table208 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table209 2024-11-07T18:27:14,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1102 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1344 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1103 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1345 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1104 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1346 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1105 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1347 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1106 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1348 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1107 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1349 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1108 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table690 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table690) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1109 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table670 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table670) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table671 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table671) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table430 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table672 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table672) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table431 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table673 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table673) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table432 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table674 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table674) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1330 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table433 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table675 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table675) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1331 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table434 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table676 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table676) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1332 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table435 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table677 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table677) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table436 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table678 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table678) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table437 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table679 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table679) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table438 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table439 2024-11-07T18:27:14,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1333 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1334 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1335 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1336 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1337 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1338 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1339 2024-11-07T18:27:14,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table460 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table461 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table220 2024-11-07T18:27:14,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table462 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table221 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table463 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table222 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table464 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table223 2024-11-07T18:27:14,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table465 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table224 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table466 2024-11-07T18:27:14,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1320 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table225 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,696 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table467 2024-11-07T18:27:14,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1321 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table226 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table468 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table227 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table469 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table228 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table229 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1322 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1323 2024-11-07T18:27:14,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1324 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1325 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1326 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1327 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1328 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table470 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1329 2024-11-07T18:27:14,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table691 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table691) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table450 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table692 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table692) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table451 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table693 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table693) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table210 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table452 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table694 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table694) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table211 2024-11-07T18:27:14,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table453 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table695 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table695) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table212 2024-11-07T18:27:14,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table454 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table696 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table696) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table213 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table455 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table697 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table697) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table214 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table456 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table698 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table698) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1310 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table215 2024-11-07T18:27:14,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table457 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table699 2024-11-07T18:27:14,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table699) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table216 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table458 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table217 2024-11-07T18:27:14,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table459 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table218 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table219 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1319 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1311 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1312 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1313 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1314 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1315 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1316 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1317 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1318 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table196 2024-11-07T18:27:14,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table197 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table198 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table199 2024-11-07T18:27:14,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table163 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table164 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table165 2024-11-07T18:27:14,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table166 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table167 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table168 2024-11-07T18:27:14,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table169 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table170 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table171 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table172 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table173 2024-11-07T18:27:14,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table394 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-07T18:27:14,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table395 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table396 2024-11-07T18:27:14,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table155 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table397 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table156 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table398 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table157 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table399 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table158 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table159 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table160 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table161 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table162 2024-11-07T18:27:14,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table185 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table186 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table187 2024-11-07T18:27:14,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table188 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table189 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table190 2024-11-07T18:27:14,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table191 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table192 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table193 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table194 2024-11-07T18:27:14,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table195 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table174 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table175 2024-11-07T18:27:14,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table176 2024-11-07T18:27:14,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table177 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table178 2024-11-07T18:27:14,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table179 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table180 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table181 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table182 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table183 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table184 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table800 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table800) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table801 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table801) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table802 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table802) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table803 2024-11-07T18:27:14,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table803) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table804 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table804) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table805 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table805) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table806 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table806) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table807 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table807) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table808 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table808) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table809 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table809) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table820 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table820) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table821 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table821) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table822 2024-11-07T18:27:14,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table822) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table823 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table823) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table824 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table824) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table825 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table825) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table826 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table826) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table827 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table827) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table828 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table828) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table829 2024-11-07T18:27:14,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table829) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1193 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1194 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1195 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1196 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1197 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1198 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1199 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table810 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table810) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table811 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table811) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table812 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table812) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table813 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table813) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table814 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table814) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1190 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table815 2024-11-07T18:27:14,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table815) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1191 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table816 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table816) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1192 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table817 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table817) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table818 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table818) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table819 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table819) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1260 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1261 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table760 2024-11-07T18:27:14,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table760) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1020 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1020) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1262 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table761 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table761) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1021 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1021) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1263 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table520 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table520) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table762 2024-11-07T18:27:14,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table762) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1022 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1022) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1264 2024-11-07T18:27:14,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table521 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table521) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table763 2024-11-07T18:27:14,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table763) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1023 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1023) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1265 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table522 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table522) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table764 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table764) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1024 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1024) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1266 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table523 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table523) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table765 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table765) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table524 2024-11-07T18:27:14,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table524) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table766 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table766) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table525 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table525) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table767 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table767) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table526 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table526) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table768 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table768) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table527 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table527) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table769 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table769) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table528 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table528) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table529 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table529) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-07T18:27:14,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,774 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,774 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,774 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-07T18:27:14,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-07T18:27:14,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1025 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1025) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1267 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1026 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1026) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1268 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1027 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1027) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1269 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1028 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1028) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1029 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1029) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1490 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1491 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table990 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table990) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1250 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1492 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table991 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table991) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1251 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1493 2024-11-07T18:27:14,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table750 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table750) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table992 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table992) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1010 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1010) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1252 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1494 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table751 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table751) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table993 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table993) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1011 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1011) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1253 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1495 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table510 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table510) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table752 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table752) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table994 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table994) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1012 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1012) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1254 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1496 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table511 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table511) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table753 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table753) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table995 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table995) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1013 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1013) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1255 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1497 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table512 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table512) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table754 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table754) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table996 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table996) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table513 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table513) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table755 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table755) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table997 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table997) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table514 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table514) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table756 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table756) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table998 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table998) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table515 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table515) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table757 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table757) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table999 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table999) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table516 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table516) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table758 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table758) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table517 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table517) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table759 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table759) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table518 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table518) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table519 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table519) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1014 2024-11-07T18:27:14,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1014) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1256 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1498 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-07T18:27:14,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1015 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1015) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1257 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1499 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1016 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1016) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1258 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-07T18:27:14,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1017 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1017) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1259 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1018 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1018) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1019 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1019) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table780 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table780) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1480 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table781 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table781) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1481 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table540 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table540) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table782 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table782) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1240 2024-11-07T18:27:14,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1482 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table541 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table541) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table783 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table783) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1241 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1483 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table300 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table542 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table542) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table784 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table784) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1000 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1000) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1242 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1484 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table301 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table543 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table543) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table785 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table785) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1001 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1001) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1243 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1485 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table302 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table544 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table544) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table786 2024-11-07T18:27:14,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table786) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1002 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1002) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1244 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1486 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table303 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table545 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table545) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table787 2024-11-07T18:27:14,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table787) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table304 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table546 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table546) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table788 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table788) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table305 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table547 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table547) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table789 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table789) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table306 2024-11-07T18:27:14,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table548 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table548) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table307 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table549 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table549) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table308 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table309 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1003 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1003) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1245 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1487 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1004 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1004) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1246 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1488 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1005 2024-11-07T18:27:14,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1005) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1247 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1489 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-07T18:27:14,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1006 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1006) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1248 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1007 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1007) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1249 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1008 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1008) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1009 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1009) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,827 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table770 2024-11-07T18:27:14,827 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table770) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1470 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table771 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table771) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1471 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table530 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table530) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table772 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table772) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1230 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1472 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table531 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table531) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table773 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table773) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1231 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1473 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table532 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table532) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table774 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table774) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1232 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1474 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table533 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table533) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table775 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table775) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1233 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1475 2024-11-07T18:27:14,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table534 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table534) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table776 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table776) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table535 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table535) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table777 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table777) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table536 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table536) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table778 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table778) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table537 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table537) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table779 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table779) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table538 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table538) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table539 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table539) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1234 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1476 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-07T18:27:14,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1235 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1477 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-07T18:27:14,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1236 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1478 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1237 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1479 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1238 2024-11-07T18:27:14,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1239 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1061 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1061) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1062 2024-11-07T18:27:14,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1062) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1063 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1063) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1064 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1064) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1065 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1065) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table960 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table960) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1066 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1066) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table961 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table961) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1067 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1067) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table720 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table720) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table962 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table962) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1068 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1068) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table721 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table721) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table963 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table963) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table722 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table722) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table964 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table964) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table723 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table723) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table965 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table965) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table724 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table724) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table966 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table966) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table725 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table725) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table967 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table967) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table726 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table726) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table968 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table968) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table727 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table727) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table969 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table969) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table728 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table728) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1060 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1060) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table729 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table729) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1069 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1069) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1050 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1050) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1292 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1051 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1051) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1293 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1052 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1052) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1294 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1053 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1053) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1295 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1054 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1054) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1296 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1055 2024-11-07T18:27:14,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1055) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1297 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table950 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table950) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1056 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1056) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1298 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table951 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table951) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1057 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1057) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1299 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table710 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table710) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table952 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table952) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table711 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table711) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table953 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table953) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table712 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table712) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table954 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table954) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table713 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table713) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table955 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table955) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table714 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table714) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table956 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table956) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table715 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table715) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table957 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table957) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table716 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table716) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table958 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table958) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1290 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table717 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table717) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table959 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table959) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1291 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table718 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table718) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table719 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table719) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1058 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1058) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1059 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1059) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1281 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1040 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1040) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1282 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1041 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1041) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1283 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table980 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table980) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1042 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1042) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1284 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table981 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table981) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1043 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1043) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1285 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table740 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table740) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table982 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table982) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1044 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1044) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1286 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table741 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table741) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table983 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table983) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1045 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1045) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1287 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table500 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table500) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table742 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table742) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table984 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table984) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1046 2024-11-07T18:27:14,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1046) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1288 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table501 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table501) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table743 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table743) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table985 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table985) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table502 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table502) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table744 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table744) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table986 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table986) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table503 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table503) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table745 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table745) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table987 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table987) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table504 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table504) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table746 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table746) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table988 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table988) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table505 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table505) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table747 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table747) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table989 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table989) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table506 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table506) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table748 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table748) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table507 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table507) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table749 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table749) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table508 2024-11-07T18:27:14,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table508) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1280 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table509 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table509) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1047 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1047) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1289 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1048 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1048) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1049 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1049) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1270 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1271 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1030 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1030) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1272 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1031 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1031) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1273 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table970 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table970) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1032 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1032) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1274 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table971 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table971) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1033 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1033) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1275 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table730 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table730) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table972 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table972) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1034 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1034) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1276 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table731 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table731) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table973 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table973) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1035 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1035) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1277 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table732 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table732) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table974 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table974) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table733 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table733) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table975 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table975) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table734 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table734) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table976 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table976) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table735 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table735) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table977 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table977) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table736 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table736) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table978 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table978) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table737 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table737) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table979 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table979) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table738 2024-11-07T18:27:14,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table738) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table739 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table739) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1036 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1036) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1278 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1037 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1037) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1279 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1038 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1038) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1039 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1039) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table361 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table362 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table363 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,890 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,890 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,891 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table364 2024-11-07T18:27:14,891 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,892 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table365 2024-11-07T18:27:14,892 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,893 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table366 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,893 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,894 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table367 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,894 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table368 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1420 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,895 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,895 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table369 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1429 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1421 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1422 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1423 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1424 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1425 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1426 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table370 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1427 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,900 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:14,900 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table371 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,901 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,901 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,901 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1428 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,901 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table350 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table592 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table592) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table351 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,902 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,902 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table593 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table593) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table352 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,903 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table594 2024-11-07T18:27:14,903 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table594) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table353 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table595 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,904 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,904 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table595) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table354 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table596 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,905 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,905 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table596) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,906 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,906 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table355 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,906 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,906 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table597 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table597) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table356 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,907 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table598 2024-11-07T18:27:14,907 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table598) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table357 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table599 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,908 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,908 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table599) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table358 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,909 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,909 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table359 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,910 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,910 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1418 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,911 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1419 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,911 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1410 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1411 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1412 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1413 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1414 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1415 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1416 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,913 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table360 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,913 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1417 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table383 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,914 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,914 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table384 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,915 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,915 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table385 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,916 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,916 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table386 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table387 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,917 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-07T18:27:14,917 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table388 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,918 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table389 2024-11-07T18:27:14,918 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:14,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1407 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1408 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,920 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:14,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,921 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,921 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,921 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,921 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1409 2024-11-07T18:27:14,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,922 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,922 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1400 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1401 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,923 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,923 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1402 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table390 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,924 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1403 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table391 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,925 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1404 2024-11-07T18:27:14,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,926 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,926 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,926 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table392 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1405 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,927 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:14,927 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table393 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,928 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1406 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,928 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table372 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,929 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,929 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table373 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,930 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table374 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table375 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-07T18:27:14,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table376 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,932 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,932 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,932 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-07T18:27:14,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table377 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,933 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table378 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table379 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,934 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table380 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table381 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,935 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table382 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,936 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table560 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table560) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table561 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table561) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table320 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,938 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,938 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table562 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table562) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1460 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table321 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table563 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,939 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table563) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1461 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table322 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table564 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table564) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,940 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1220 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,940 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1462 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table323 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table565 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,941 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table565) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1221 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1463 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table324 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table566 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table566) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1222 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1464 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,943 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table325 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table567 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table567) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table326 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,944 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,944 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table568 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table568) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table327 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table569 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table569) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,945 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table328 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table329 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1223 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,946 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1465 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1224 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,947 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,947 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1466 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1225 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1467 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1226 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1468 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,949 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,949 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1227 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1469 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,950 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,950 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1228 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,951 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1229 2024-11-07T18:27:14,951 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,952 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table790 2024-11-07T18:27:14,952 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table790) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table791 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table791) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table550 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table550) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table792 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table792) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table551 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table551) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table793 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table793) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table310 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table552 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,955 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table552) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table794 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table794) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1450 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,956 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,956 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table311 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table553 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table553) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table795 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table795) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,957 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1451 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table312 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table554 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table554) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table796 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table796) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,958 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1210 2024-11-07T18:27:14,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1452 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table313 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table555 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,959 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table555) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table797 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table797) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1211 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1453 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,960 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,960 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table314 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table556 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table556) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table798 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,961 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table798) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table315 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table557 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table557) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table799 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,962 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table799) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,962 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table316 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table558 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table558) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table317 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,963 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table559 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,963 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table559) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table318 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table319 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1212 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,964 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1454 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1213 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1455 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1214 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1456 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-07T18:27:14,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1215 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1457 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,967 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1216 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1458 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,968 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1217 2024-11-07T18:27:14,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1459 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1218 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,969 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,970 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1219 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,970 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table581 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table581) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,971 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table340 2024-11-07T18:27:14,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table582 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table582) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,972 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table341 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table583 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table583) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,973 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table342 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table584 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table584) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,974 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,974 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,975 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table343 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,975 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,975 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table585 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table585) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table344 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,976 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table586 2024-11-07T18:27:14,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table586) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1440 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table345 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,977 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table587 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table587) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1441 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,978 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,978 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table346 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table588 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table588) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1200 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1442 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,979 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-07T18:27:14,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,980 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,980 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,980 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,980 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table347 2024-11-07T18:27:14,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table589 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table589) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table348 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,981 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table349 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1209 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1201 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,983 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1443 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,983 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1202 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1444 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1203 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,984 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1445 2024-11-07T18:27:14,984 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,985 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,985 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1204 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,985 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1446 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1205 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,986 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1447 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1206 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,987 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1448 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,987 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table590 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table590) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1207 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1449 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table591 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,989 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table591) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1208 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,990 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table570 2024-11-07T18:27:14,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table570) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table571 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table571) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,991 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table330 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table572 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table572) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,992 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table331 2024-11-07T18:27:14,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table573 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table573) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table332 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,993 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table574 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table574) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,994 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table333 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table575 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table575) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table334 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,995 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table576 2024-11-07T18:27:14,995 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table576) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1430 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table335 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table577 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,996 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table577) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1431 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table336 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table578 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table578) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,997 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table337 2024-11-07T18:27:14,997 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table579 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table579) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table338 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,998 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table339 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,998 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1432 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1433 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:14,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1434 2024-11-07T18:27:14,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1435 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1436 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,000 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,000 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1437 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1438 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,001 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table580 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table580) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1439 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,002 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table284 2024-11-07T18:27:15,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table285 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table286 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,003 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table287 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table288 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,004 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,004 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,004 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table289 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table290 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table291 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table292 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table293 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table294 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table273 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,006 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table274 2024-11-07T18:27:15,006 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table275 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table276 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,007 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table277 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,007 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table278 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table279 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table280 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table281 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,009 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table282 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,009 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table283 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table295 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table296 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table297 2024-11-07T18:27:15,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table298 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table299 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,011 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table920 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table920) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table921 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table921) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table922 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table922) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table923 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table923) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,012 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table924 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table924) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table925 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table925) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table926 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table926) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,013 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table927 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table927) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table928 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table928) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table929 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table929) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1094 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,014 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1094) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1095 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1095) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1096 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1096) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1097 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1097) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1098 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,015 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1098) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1099 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1099) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table910 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table910) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table911 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table911) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table912 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table912) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1090 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1090) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table913 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table913) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1091 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1091) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,017 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table914 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,017 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table914) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1092 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1092) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table915 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table915) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,018 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1093 2024-11-07T18:27:15,018 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1093) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table916 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table916) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table917 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table917) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table918 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,019 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table918) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,019 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table919 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table919) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1083 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1083) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1084 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1084) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1085 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,020 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1085) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,020 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1086 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1086) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1087 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1087) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1088 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1088) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1089 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1089) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,021 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table940 2024-11-07T18:27:15,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table940) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table941 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,022 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,022 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table941) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table700 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table700) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table942 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table942) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table701 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table701) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table943 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table943) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,023 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table702 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table702) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table944 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table944) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table703 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table703) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table945 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table945) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,024 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table704 2024-11-07T18:27:15,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table704) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table946 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table946) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1080 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1080) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table705 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table705) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,025 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table947 2024-11-07T18:27:15,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table947) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1081 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1081) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table706 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,026 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table706) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table948 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table948) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1082 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1082) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table707 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table707) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,027 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table949 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table949) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table708 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table708) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table709 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table709) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1072 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1072) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,028 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1073 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,028 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1073) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1074 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1074) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1075 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1075) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1076 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1076) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,029 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1077 2024-11-07T18:27:15,029 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1077) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1078 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1078) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1079 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1079) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,030 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table930 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,030 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table930) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table931 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table931) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table932 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,031 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table932) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table933 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table933) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table934 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table934) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table935 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,032 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table935) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table936 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table936) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1070 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1070) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table937 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table937) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1071 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1071) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table938 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table938) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table939 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table939) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table900 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table900) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table901 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,034 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table901) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table902 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table902) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table903 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table903) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,035 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table904 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table904) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table905 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table905) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table906 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table906) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table907 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,036 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table907) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table908 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table908) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table909 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2005812667=2, srv379558464=3, srv681066476=4, srv1932255493=1, srv706090125=5, srv172749983=0} racks are {rack=0} 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,037 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,037 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table909) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1380 2024-11-07T18:27:15,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,065 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,065 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,065 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1381 2024-11-07T18:27:15,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,065 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table880 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table880) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1140 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1382 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,066 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table881 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table881) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1141 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1383 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table640 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,067 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table640) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table882 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table882) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1142 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1384 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,068 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table641 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table641) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table883 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table883) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1143 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1385 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,069 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table400 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table642 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table642) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table884 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table884) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,070 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1144 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1386 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table401 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table643 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table643) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,071 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table885 2024-11-07T18:27:15,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table885) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1145 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1387 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table402 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table644 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table644) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table886 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table886) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,073 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table403 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table645 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table645) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table887 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table887) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table404 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,074 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table646 2024-11-07T18:27:15,074 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table646) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table888 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table888) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table405 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table647 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,075 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,075 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table647) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table889 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table889) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table406 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table648 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,076 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table648) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table407 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table649 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table649) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table408 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table409 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1146 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1388 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,078 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1147 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1389 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1148 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1149 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,079 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1370 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1371 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table870 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table870) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1130 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,080 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1372 2024-11-07T18:27:15,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table871 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,081 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,081 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table871) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,081 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1131 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1373 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table630 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table630) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table872 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table872) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,082 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1132 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1374 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table631 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table631) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table873 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table873) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,083 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1133 2024-11-07T18:27:15,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1375 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table632 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table632) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table874 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table874) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,084 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1134 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,084 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1376 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table633 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table633) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,085 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table875 2024-11-07T18:27:15,085 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table875) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table634 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table634) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table876 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table876) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table635 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table635) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,086 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table877 2024-11-07T18:27:15,086 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table877) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table636 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table636) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table878 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table878) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table637 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table637) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,087 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table879 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,087 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table879) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table638 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table638) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table639 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table639) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1135 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1377 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1136 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1378 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,089 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1137 2024-11-07T18:27:15,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1379 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1138 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1139 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,090 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table660 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table660) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1360 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table661 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table661) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1361 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,091 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table420 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table662 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table662) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1120 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1362 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,092 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table421 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table663 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table663) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1121 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,093 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1363 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table422 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table664 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table664) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1122 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1364 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table423 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table665 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,095 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table665) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1123 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1365 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table424 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table666 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,096 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table666) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table425 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table667 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table667) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table426 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,097 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table668 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,097 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table668) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table427 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table669 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table669) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,098 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table428 2024-11-07T18:27:15,098 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table429 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1124 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1366 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1125 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1367 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1126 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1368 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,100 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1127 2024-11-07T18:27:15,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1369 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1128 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1129 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,101 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table890 2024-11-07T18:27:15,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table890) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table891 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table891) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table650 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table650) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table892 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,102 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table892) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1350 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table651 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table651) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table893 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table893) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1351 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,103 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table410 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table652 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table652) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table894 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table894) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1110 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,104 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1352 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table411 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table653 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table653) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,105 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table895 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table895) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1111 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1353 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table412 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,106 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table654 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table654) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table896 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table896) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1112 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1354 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,107 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table413 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table655 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table655) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table897 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table897) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,108 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table414 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table656 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table656) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table898 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table898) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table415 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,109 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table657 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table657) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table899 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table899) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table416 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,110 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table658 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table658) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table417 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table659 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table659) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,111 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table418 2024-11-07T18:27:15,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table419 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1113 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1355 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,112 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,112 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1114 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1356 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1115 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1357 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,113 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1116 2024-11-07T18:27:15,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1358 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1117 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,114 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1359 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1118 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1119 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1182 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,115 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1183 2024-11-07T18:27:15,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1184 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1185 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1186 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,116 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1187 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table840 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table840) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1188 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table841 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table841) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1189 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,117 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table600 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table600) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table842 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table842) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table601 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,118 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table601) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table843 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table843) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table602 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table602) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table844 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,119 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table844) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,119 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table603 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table603) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table845 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table845) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table604 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table604) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table846 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table846) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,120 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table605 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,120 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table605) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table847 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table847) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table606 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table606) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table848 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table848) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1180 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table607 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table607) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table849 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table849) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1181 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,122 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table608 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table608) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table609 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table609) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1171 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,123 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,123 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1172 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1173 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1174 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1175 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,124 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,124 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1176 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1177 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table830 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table830) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1178 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,125 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table831 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table831) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table832 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table832) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table833 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table833) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table834 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,126 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,126 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table834) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table835 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table835) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table836 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table836) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table837 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,127 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table837) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,127 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table838 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table838) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1170 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table839 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table839) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1179 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,128 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1160 2024-11-07T18:27:15,128 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1161 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1162 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1163 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,129 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table860 2024-11-07T18:27:15,129 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table860) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1164 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table861 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table861) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1165 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,130 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,130 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table620 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table620) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table862 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table862) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1166 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table621 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,131 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,131 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table621) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table863 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table863) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1167 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table622 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table622) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table864 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table864) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table623 2024-11-07T18:27:15,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table623) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table865 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table865) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table624 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table624) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table866 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table866) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,133 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table625 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table625) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table867 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table867) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table626 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table626) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table868 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,134 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table868) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table627 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table627) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table869 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table869) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table628 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table628) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table629 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,135 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,135 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table629) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1168 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1169 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1391 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1150 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,136 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,136 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1392 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1151 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1393 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1152 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,137 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1394 2024-11-07T18:27:15,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1153 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1395 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table850 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,138 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table850) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1154 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1396 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table851 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table851) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1155 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,139 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1397 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table610 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table610) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table852 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table852) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1156 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,140 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,140 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1398 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table611 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table611) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table853 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table853) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table612 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table612) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,141 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table854 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,141 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table854) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table613 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table613) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table855 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table855) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table614 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table614) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table856 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table856) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table615 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table615) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table857 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table857) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table616 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,143 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table616) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table858 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table858) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table617 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table617) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table859 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table859) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table618 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,144 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table618) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1390 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table619 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table619) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1157 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1399 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,145 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1158 2024-11-07T18:27:15,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1159 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table240 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,146 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table482 2024-11-07T18:27:15,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table241 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table483 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table242 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table484 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table243 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table485 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,148 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table244 2024-11-07T18:27:15,148 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table486 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table245 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table487 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,149 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table246 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table488 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table247 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,150 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table489 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table248 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table249 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,151 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1308 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1309 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1300 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1301 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1302 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1303 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1304 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table490 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,153 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,153 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1305 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table491 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1306 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table250 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,154 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table492 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1307 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,155 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table471 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table230 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table472 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table231 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,156 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,156 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table473 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table232 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table474 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,157 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table233 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table475 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table234 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table476 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,158 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table235 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table477 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table236 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,159 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table478 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table237 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table479 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,160 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table238 2024-11-07T18:27:15,160 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table239 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table480 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table481 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table262 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table263 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table264 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table265 2024-11-07T18:27:15,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,163 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,163 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,163 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,163 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table266 2024-11-07T18:27:15,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table267 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table268 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,164 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table269 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table270 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table271 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,165 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table272 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table251 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table493 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table252 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,166 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table494 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table253 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table495 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,167 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table254 2024-11-07T18:27:15,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table496 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table255 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table497 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,168 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table256 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table498 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,169 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table257 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,169 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table499 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table258 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table259 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,170 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,170 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table260 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table261 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table680 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table680) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table681 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,171 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table681) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table440 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table682 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table682) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table441 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table683 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table683) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table200 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table442 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table684 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,173 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,173 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table684) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1340 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table201 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table443 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,174 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table685 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table685) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1341 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table202 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table444 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,175 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,175 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table686 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table686) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1100 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1342 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table203 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,176 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,176 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table445 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table687 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table687) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1101 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1343 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,177 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,177 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table204 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table446 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table688 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,178 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table688) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table205 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table447 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table689 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table689) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,179 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table206 2024-11-07T18:27:15,179 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,180 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,180 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,180 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table448 2024-11-07T18:27:15,180 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,181 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,181 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,181 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table207 2024-11-07T18:27:15,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table449 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table208 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table209 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,182 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1102 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1344 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1103 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1345 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,183 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1104 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1346 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1105 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,184 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1347 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1106 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1348 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1107 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1349 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,185 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1108 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table690 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table690) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1109 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table670 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,186 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table670) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table671 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table671) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table430 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table672 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table672) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,187 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table431 2024-11-07T18:27:15,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table673 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table673) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table432 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table674 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,188 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table674) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1330 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table433 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table675 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table675) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,189 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1331 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,189 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table434 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table676 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table676) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1332 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,190 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table435 2024-11-07T18:27:15,190 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table677 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table677) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table436 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table678 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,191 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table678) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,191 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table437 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table679 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table679) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table438 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table439 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1333 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1334 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1335 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,193 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1336 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1337 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1338 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1339 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,194 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table460 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table461 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table220 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table462 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,195 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,195 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table221 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table463 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table222 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,196 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,196 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table464 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table223 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table465 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,197 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table224 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table466 2024-11-07T18:27:15,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,198 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,198 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,198 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1320 2024-11-07T18:27:15,198 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table225 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,199 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,199 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table467 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,199 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1321 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table226 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table468 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,200 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,200 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table227 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table469 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table228 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table229 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1322 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1323 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1324 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,202 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1325 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1326 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1327 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1328 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,203 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,203 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table470 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1329 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table691 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table691) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table450 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,204 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table692 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table692) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table451 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table693 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,205 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table693) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,205 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table210 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table452 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table694 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table694) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,206 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table211 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table453 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table695 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table695) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table212 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,207 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table454 2024-11-07T18:27:15,207 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table696 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table696) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table213 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table455 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,208 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,208 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table697 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table697) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table214 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table456 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,209 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table698 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,209 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table698) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1310 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table215 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table457 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,210 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table699 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table699) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table216 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table458 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table217 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,211 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table459 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table218 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table219 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,212 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1319 2024-11-07T18:27:15,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1311 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1312 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1313 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,213 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1314 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1315 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1316 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1317 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,214 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1318 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table196 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table197 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,215 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table198 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table199 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table163 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table163) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table164 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,216 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,216 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table164) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table165 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table165) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table166 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table166) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table167 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table167) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,217 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table168 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,217 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table168) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table169 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table169) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table170 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table170) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,218 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table171 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,218 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table171) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table172 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table172) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table173 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table173) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,219 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table394 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table394) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table395 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table395) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table396 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table396) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table155 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table155) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table397 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,221 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,221 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table397) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table156 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table156) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table398 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table398) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table157 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,222 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table157) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,222 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table399 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table399) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table158 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table158) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table159 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table159) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,223 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table160 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table160) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table161 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table161) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table162 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table162) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table185 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,224 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table185) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,224 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table186 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table186) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table187 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table187) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table188 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table188) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,225 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table189 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table189) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table190 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table191 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table192 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,226 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,226 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table193 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table194 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table195 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,227 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,227 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table174 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table174) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table175 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table175) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table176 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table176) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,228 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table177 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,228 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table177) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table178 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table178) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table179 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table179) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table180 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,229 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table180) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,229 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table181 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table181) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table182 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table182) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table183 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table183) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table184 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table184) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table800 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table800) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table801 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table801) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table802 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,231 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table802) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table803 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table803) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table804 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table804) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table805 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table805) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,232 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table806 2024-11-07T18:27:15,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table806) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table807 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table807) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table808 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table808) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table809 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table809) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,233 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table820 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table820) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table821 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table821) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table822 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table822) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table823 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table823) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,234 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table824 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table824) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table825 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table825) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table826 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table826) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table827 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table827) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,235 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table828 2024-11-07T18:27:15,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table828) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table829 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table829) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1193 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1193) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1194 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,236 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,236 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1194) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1195 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1195) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1196 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1196) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1197 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1197) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1198 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1198) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,237 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1199 2024-11-07T18:27:15,237 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1199) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table810 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table810) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table811 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table811) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table812 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table812) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,238 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table813 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,238 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table813) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table814 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table814) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1190 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1190) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table815 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table815) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1191 2024-11-07T18:27:15,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1191) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table816 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table816) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1192 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1192) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table817 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table817) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,240 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table818 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table818) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table819 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table819) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1260 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1260) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1261 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1261) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,241 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table760 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table760) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1020 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1020) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1262 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1262) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table761 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table761) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1021 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,242 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1021) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1263 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1263) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table520 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table520) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table762 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table762) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,243 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1022 2024-11-07T18:27:15,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1022) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1264 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1264) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table521 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table521) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table763 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table763) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,244 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1023 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1023) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1265 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1265) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table522 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table522) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table764 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table764) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1024 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,245 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1024) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1266 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1266) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table523 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table523) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table765 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table765) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table524 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,246 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table524) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table766 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table766) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table525 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table525) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table767 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table767) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,247 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table526 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,247 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table526) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table768 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table768) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table527 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table527) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table769 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table769) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table528 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table528) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table529 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table529) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,249 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-07T18:27:15,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1025 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1025) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1267 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,250 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1267) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,250 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1026 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1026) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1268 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1268) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,251 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-07T18:27:15,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1027 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1027) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1269 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1269) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,252 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1028 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1028) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1029 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1029) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,253 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1490 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1490) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1491 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1491) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,254 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table990 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,254 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table990) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1250 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1250) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1492 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1492) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table991 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table991) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,255 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1251 2024-11-07T18:27:15,255 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1251) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1493 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1493) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table750 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table750) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table992 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table992) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,256 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1010 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,256 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1010) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1252 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1252) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1494 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1494) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table751 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table751) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table993 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table993) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1011 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1011) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1253 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1253) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1495 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1495) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,258 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table510 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table510) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table752 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table752) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table994 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table994) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1012 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1012) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,259 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1254 2024-11-07T18:27:15,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1254) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1496 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1496) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table511 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table511) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table753 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table753) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,260 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table995 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table995) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1013 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1013) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1255 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1255) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1497 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1497) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table512 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,261 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table512) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table754 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table754) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table996 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table996) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table513 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table513) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table755 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table755) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table997 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table997) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table514 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table514) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table756 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table756) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,263 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table998 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,263 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table998) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table515 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table515) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table757 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table757) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table999 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table999) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,264 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table516 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,264 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table516) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table758 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table758) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table517 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table517) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table759 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table759) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table518 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,265 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table518) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table519 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table519) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-07T18:27:15,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1014 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1014) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1256 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1256) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1498 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,267 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1498) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1015 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1015) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1257 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1257) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1499 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,268 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1499) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1016 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1016) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1258 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1258) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,269 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1017 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1017) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1259 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1259) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,270 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1018 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1018) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1019 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1019) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table780 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table780) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,272 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1480 2024-11-07T18:27:15,272 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1480) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table781 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table781) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1481 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1481) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table540 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,273 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table540) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table782 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table782) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1240 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1240) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1482 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1482) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,274 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table541 2024-11-07T18:27:15,274 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table541) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table783 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table783) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1241 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1241) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,275 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1483 2024-11-07T18:27:15,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1483) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table300 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table300) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table542 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table542) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table784 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table784) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1000 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1000) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1242 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1242) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1484 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,277 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,277 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1484) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table301 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table301) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table543 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table543) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table785 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table785) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,278 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1001 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1001) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1243 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1243) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1485 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1485) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table302 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,279 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table302) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table544 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table544) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table786 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table786) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1002 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1002) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1244 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1244) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1486 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1486) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table303 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table303) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,281 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table545 2024-11-07T18:27:15,281 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table545) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table787 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table787) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table304 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table304) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,282 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table546 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,282 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table546) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table788 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table788) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table305 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table305) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table547 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,283 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table547) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table789 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table789) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table306 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table306) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table548 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table548) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table307 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table307) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table549 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table549) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table308 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,285 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table308) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table309 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table309) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,286 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1003 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1003) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1245 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1245) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1487 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,287 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1487) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,287 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1004 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1004) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1246 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1246) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,288 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1488 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1488) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1005 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1005) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,289 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1247 2024-11-07T18:27:15,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1247) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1489 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1489) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,290 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1006 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,290 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1006) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1248 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1248) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1007 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,291 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1007) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1249 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1249) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1008 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1008) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,292 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-07T18:27:15,292 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1009 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1009) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,293 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-07T18:27:15,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table770 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,294 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table770) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1470 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1470) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table771 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table771) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1471 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1471) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,295 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table530 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table530) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table772 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table772) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1230 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1230) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1472 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,296 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1472) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table531 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table531) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table773 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table773) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1231 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1231) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,297 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1473 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1473) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table532 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table532) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table774 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table774) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1232 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1232) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1474 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1474) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table533 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table533) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,299 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table775 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,299 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table775) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1233 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1233) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1475 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1475) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table534 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,300 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,300 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table534) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table776 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table776) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table535 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table535) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table777 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table777) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,301 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table536 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,301 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table536) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table778 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table778) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table537 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table537) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table779 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,302 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table779) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table538 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table538) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table539 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table539) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,303 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1234 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1234) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1476 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1476) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,304 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,304 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1235 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1235) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1477 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1477) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,305 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1236 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1236) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1478 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1478) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1237 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,306 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1237) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1479 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1479) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1238 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,307 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,307 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1238) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1239 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1239) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,308 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1061 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1061) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1062 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,309 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1062) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,309 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1063 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1063) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1064 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1064) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1065 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1065) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table960 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,310 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table960) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1066 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1066) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table961 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table961) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1067 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,311 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1067) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,311 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table720 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table720) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table962 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table962) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1068 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1068) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table721 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,312 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table721) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table963 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table963) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table722 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table722) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table964 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,313 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table964) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table723 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table723) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table965 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table965) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table724 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,314 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,314 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table724) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table966 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table966) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table725 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table725) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table967 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table967) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,315 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table726 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table726) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table968 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table968) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table727 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table727) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table969 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,316 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,316 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table969) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table728 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table728) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1060 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1060) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table729 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table729) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,317 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1069 2024-11-07T18:27:15,317 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1069) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1050 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1050) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1292 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1051 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,318 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1051) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1293 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1052 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1052) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1294 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,319 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,319 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1053 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1053) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1295 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1054 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1054) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1296 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1055 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1055) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1297 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table950 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table950) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1056 2024-11-07T18:27:15,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1056) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1298 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table951 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table951) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,322 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1057 2024-11-07T18:27:15,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1057) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1299 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table710 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table710) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table952 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,323 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table952) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table711 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table711) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table953 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table953) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table712 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table712) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,324 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table954 2024-11-07T18:27:15,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table954) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table713 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table713) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table955 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table955) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table714 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table714) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table956 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table956) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table715 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table715) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table957 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,326 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table957) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table716 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table716) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table958 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table958) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1290 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,327 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table717 2024-11-07T18:27:15,327 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table717) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table959 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table959) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1291 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table718 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,328 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table718) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table719 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table719) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1058 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1058) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1059 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,329 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1059) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1281 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1040 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1040) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1282 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1041 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1041) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1283 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table980 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table980) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,331 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1042 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1042) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1284 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table981 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table981) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1043 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,332 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1043) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1285 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table740 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table740) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table982 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table982) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,333 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1044 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1044) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1286 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table741 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table741) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table983 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table983) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1045 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1045) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1287 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table500 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,335 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table500) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table742 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table742) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table984 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table984) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1046 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,336 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1046) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1288 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table501 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table501) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table743 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table743) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,337 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table985 2024-11-07T18:27:15,337 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table985) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table502 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table502) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table744 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table744) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table986 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table986) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table503 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table503) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table745 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table745) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table987 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table987) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table504 2024-11-07T18:27:15,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table504) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table746 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table746) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table988 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table988) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,340 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table505 2024-11-07T18:27:15,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table505) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table747 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table747) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table989 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table989) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table506 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,341 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table506) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table748 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table748) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table507 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table507) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table749 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table749) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,342 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table508 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table508) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1280 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table509 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table509) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1047 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1047) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1289 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1048 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1048) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1049 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,344 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1049) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,344 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1270 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1270) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1271 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,345 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1271) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,345 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1030 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,345 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1030) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1272 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1272) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1031 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1031) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1273 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,346 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table970 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table970) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1032 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1032) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1274 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,347 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,347 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table971 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table971) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1033 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1033) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1275 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table730 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table730) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table972 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table972) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1034 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1034) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,349 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1276 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table731 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table731) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table973 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,350 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table973) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1035 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1035) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1277 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table732 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table732) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,351 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table974 2024-11-07T18:27:15,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table974) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table733 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table733) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table975 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table975) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table734 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table734) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table976 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table976) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table735 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table735) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table977 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table977) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,353 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table736 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,353 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table736) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table978 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table978) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table737 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table737) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,354 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table979 2024-11-07T18:27:15,354 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table979) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table738 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table738) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table739 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table739) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,355 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1036 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,355 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1036) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1278 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1037 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1037) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1279 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1038 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1038) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1039 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1039) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,357 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table361 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table361) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,358 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table362 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table362) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,359 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-07T18:27:15,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table363 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,360 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,360 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table363) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,360 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-07T18:27:15,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table364 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table364) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,361 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-07T18:27:15,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table365 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table365) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,362 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-07T18:27:15,362 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table366 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,363 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table366) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,363 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table367 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,364 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table367) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table368 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table368) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1420 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1420) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table369 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,366 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table369) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1429 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,367 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1429) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1421 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1421) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1422 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1422) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1423 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1423) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,368 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1424 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1424) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1425 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1425) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1426 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,369 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1426) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table370 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,370 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table370) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,370 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1427 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1427) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table371 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,371 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table371) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1428 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1428) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table350 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table350) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table592 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,372 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,372 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table592) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table351 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table351) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table593 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table593) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,373 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table352 2024-11-07T18:27:15,373 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table352) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table594 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table594) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table353 2024-11-07T18:27:15,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table353) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table595 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table595) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,375 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,375 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table354 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table354) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table596 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table596) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,376 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table355 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table355) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table597 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table597) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,377 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table356 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table356) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table598 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table598) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,378 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-07T18:27:15,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table357 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table357) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table599 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table599) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,379 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table358 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table358) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,380 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table359 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,380 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table359) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-07T18:27:15,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,381 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,381 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,381 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-07T18:27:15,381 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1418 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1418) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1419 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1419) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1410 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,382 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1410) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1411 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1411) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1412 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1412) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1413 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1413) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,383 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1414 2024-11-07T18:27:15,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1414) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1415 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1415) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1416 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1416) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table360 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,384 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table360) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1417 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1417) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table383 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,385 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table383) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,386 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,386 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table384 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,386 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table384) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table385 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table385) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,387 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table386 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table386) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,388 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table387 2024-11-07T18:27:15,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table387) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table388 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table388) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,389 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-07T18:27:15,389 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table389 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table389) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,390 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-07T18:27:15,390 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1407 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,391 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,391 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1407) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1408 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1408) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1409 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1409) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,393 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1400 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1400) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1401 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,394 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1401) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,394 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1402 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1402) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table390 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table390) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,395 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1403 2024-11-07T18:27:15,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1403) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table391 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,396 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,396 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table391) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1404 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1404) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,397 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table392 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table392) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1405 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1405) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,398 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,398 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table393 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table393) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1406 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,399 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1406) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table372 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table372) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,400 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-07T18:27:15,400 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table373 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table373) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table374 2024-11-07T18:27:15,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table374) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,402 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,402 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,402 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table375 2024-11-07T18:27:15,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table375) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table376 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,403 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table376) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table377 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table377) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,404 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table378 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table378) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,405 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table379 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table379) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,406 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table380 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table380) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table381 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table381) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,407 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table382 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,407 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table382) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,408 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,408 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-07T18:27:15,408 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table560 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table560) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table561 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,409 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table561) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table320 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table320) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table562 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table562) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1460 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1460) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table321 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table321) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table563 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table563) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1461 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1461) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table322 2024-11-07T18:27:15,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table322) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table564 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table564) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1220 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1220) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1462 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,412 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1462) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table323 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table323) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table565 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,413 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table565) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1221 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1221) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1463 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1463) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table324 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,414 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,414 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table324) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table566 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table566) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1222 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1222) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1464 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1464) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,415 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table325 2024-11-07T18:27:15,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table325) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table567 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table567) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table326 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table326) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table568 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table568) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table327 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table327) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table569 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table569) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table328 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,417 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table328) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table329 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,418 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table329) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,418 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1223 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,418 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1223) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1465 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1465) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1224 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,419 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1224) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,419 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1466 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1466) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1225 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1225) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1467 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1467) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1226 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1226) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1468 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,421 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1468) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1227 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1227) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1469 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,422 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1469) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,422 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1228 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,423 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,423 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1228) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,423 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-07T18:27:15,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1229 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1229) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,424 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-07T18:27:15,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table790 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table790) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,425 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table791 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table791) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table550 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table550) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table792 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,426 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table792) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table551 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table551) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table793 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,427 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table793) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table310 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table310) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table552 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table552) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,428 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table794 2024-11-07T18:27:15,428 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table794) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1450 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1450) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table311 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table311) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,429 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table553 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table553) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table795 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table795) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1451 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1451) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table312 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,430 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table312) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table554 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table554) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table796 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table796) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1210 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,431 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1210) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1452 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1452) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table313 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table313) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table555 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,432 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table555) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table797 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table797) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1211 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1211) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,433 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1453 2024-11-07T18:27:15,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1453) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table314 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table314) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table556 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table556) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,434 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table798 2024-11-07T18:27:15,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table798) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table315 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table315) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table557 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table557) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table799 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,435 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table799) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table316 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table316) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table558 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table558) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table317 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,436 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table317) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table559 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table559) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table318 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table318) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table319 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,437 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table319) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1212 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1212) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1454 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1454) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,438 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1213 2024-11-07T18:27:15,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1213) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1455 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1455) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1214 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1214) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1456 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1456) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,440 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1215 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1215) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1457 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1457) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1216 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,441 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1216) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1458 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1458) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1217 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,442 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,442 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1217) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1459 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1459) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,443 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1218 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1218) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1219 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1219) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,444 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-07T18:27:15,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table581 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table581) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,445 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table340 2024-11-07T18:27:15,445 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table340) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table582 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table582) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,446 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table341 2024-11-07T18:27:15,446 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table341) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table583 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table583) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,447 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-07T18:27:15,447 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table342 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table342) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table584 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table584) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table343 2024-11-07T18:27:15,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table343) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table585 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table585) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,450 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table344 2024-11-07T18:27:15,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table344) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table586 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table586) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1440 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1440) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,451 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table345 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table345) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table587 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table587) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1441 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,452 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1441) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table346 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table346) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table588 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,453 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,453 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table588) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1200 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1200) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1442 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1442) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,454 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-07T18:27:15,454 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table347 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table347) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table589 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table589) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,455 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-07T18:27:15,455 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table348 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table348) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,456 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table349 2024-11-07T18:27:15,456 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table349) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1209 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1209) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1201 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1201) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1443 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1443) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,458 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1202 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1202) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1444 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1444) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1203 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1203) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1445 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1445) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1204 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,460 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1204) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,460 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1446 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1446) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1205 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1205) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,461 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1447 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1447) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1206 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1206) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1448 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1448) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table590 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table590) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1207 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,463 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,463 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1207) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1449 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1449) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,464 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table591 2024-11-07T18:27:15,464 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table591) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1208 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1208) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,465 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table570 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,465 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table570) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table571 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table571) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,466 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-07T18:27:15,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table330 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table330) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table572 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table572) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-07T18:27:15,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table331 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table331) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table573 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table573) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,468 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table332 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table332) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table574 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,469 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table574) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table333 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table333) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,470 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table575 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table575) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table334 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table334) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table576 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table576) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1430 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1430) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table335 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table335) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table577 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table577) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1431 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,472 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,472 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1431) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table336 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table336) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table578 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table578) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table337 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table337) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,473 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table579 2024-11-07T18:27:15,473 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table579) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table338 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table338) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table339 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,474 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table339) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1432 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1432) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1433 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1433) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1434 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1434) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1435 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1435) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1436 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1436) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1437 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1437) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,477 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1438 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1438) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table580 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table580) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,478 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1439 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1439) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table284 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table284) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table285 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,479 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,479 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table285) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table286 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table286) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table287 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table287) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,480 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table288 2024-11-07T18:27:15,480 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table288) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table289 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table289) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table290 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table290) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,481 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table291 2024-11-07T18:27:15,481 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table291) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table292 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table292) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table293 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,482 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table293) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,482 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table294 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table294) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table273 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table273) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table274 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,483 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table274) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,483 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table275 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table275) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table276 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table276) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table277 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,484 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,484 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table277) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table278 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table278) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,485 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table279 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,485 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,485 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,485 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table279) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table280 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table280) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table281 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table281) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table282 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,486 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,486 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table282) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table283 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table283) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table295 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table295) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table296 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,487 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,487 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table296) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table297 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table297) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table298 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table298) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table299 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,488 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,488 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table299) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table920 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table920) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table921 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table921) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table922 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table922) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,489 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table923 2024-11-07T18:27:15,489 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table923) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table924 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table924) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table925 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,490 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,490 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table925) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table926 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table926) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table927 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table927) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table928 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,491 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,491 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table928) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table929 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table929) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1094 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1094) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1095 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1095) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1096 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,492 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,492 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1096) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1097 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1097) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1098 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1098) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1099 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,493 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1099) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,493 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table910 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table910) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table911 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table911) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table912 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table912) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,494 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1090 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,494 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1090) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table913 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table913) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1091 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,495 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,495 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1091) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table914 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table914) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1092 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1092) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table915 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table915) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,496 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1093 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,496 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1093) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table916 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table916) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table917 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table917) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table918 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,497 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,497 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table918) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table919 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table919) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1083 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1083) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1084 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1084) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,498 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1085 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,498 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1085) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1086 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1086) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1087 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1087) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1088 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,499 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,499 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1088) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1089 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1089) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table940 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,500 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,500 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table940) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table941 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table941) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table700 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table700) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table942 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,501 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,501 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table942) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table701 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table701) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table943 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table943) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table702 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table702) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,502 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table944 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,502 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table944) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table703 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table703) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table945 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table945) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table704 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,503 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,503 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table704) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table946 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table946) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1080 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1080) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table705 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,504 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,504 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table705) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table947 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table947) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1081 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1081) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,505 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table706 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,505 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table706) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table948 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table948) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1082 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1082) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table707 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,506 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,506 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table707) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table949 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table949) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table708 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table708) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table709 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table709) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1072 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,507 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,507 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1072) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1073 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1073) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1074 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1074) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1075 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1075) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,508 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1076 2024-11-07T18:27:15,508 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1076) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1077 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1077) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1078 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,509 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,509 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1078) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1079 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1079) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table930 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table930) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table931 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table931) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,510 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table932 2024-11-07T18:27:15,510 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table932) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table933 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table933) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table934 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table934) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table935 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,511 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,511 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table935) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table936 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table936) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1070 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1070) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table937 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table937) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1071 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,512 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,512 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1071) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table938 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table938) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table939 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table939) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table900 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table900) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,513 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table901 2024-11-07T18:27:15,513 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table901) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table902 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table902) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table903 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,514 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,514 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table903) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table904 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table904) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table905 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table905) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table906 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table906) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,515 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table907 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,515 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table907) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table908 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table908) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table909 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1723660840=1, srv366684952=3, srv1886509587=2, srv115189941=0, srv67023221=5, srv383274334=4} racks are {rack=0} 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=6, number of racks=1 2024-11-07T18:27:15,516 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,516 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table909) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,517 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv374934397=13, srv153811581=5, srv1254816566=0, srv2103249459=12, srv871355214=14, srv1729415633=6, srv1458730477=3, srv1446715537=2, srv1926746760=11, srv1853445901=10, srv1476753624=4, srv1736785213=7, srv1800367277=9, srv1382044164=1, srv175915361=8} racks are {rack=0} 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,517 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,517 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv374934397=13, srv153811581=5, srv1254816566=0, srv2103249459=12, srv871355214=14, srv1729415633=6, srv1458730477=3, srv1446715537=2, srv1926746760=11, srv1853445901=10, srv1476753624=4, srv1736785213=7, srv1800367277=9, srv1382044164=1, srv175915361=8} racks are {rack=0} 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,518 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv374934397=13, srv153811581=5, srv1254816566=0, srv2103249459=12, srv871355214=14, srv1729415633=6, srv1458730477=3, srv1446715537=2, srv1926746760=11, srv1853445901=10, srv1476753624=4, srv1736785213=7, srv1800367277=9, srv1382044164=1, srv175915361=8} racks are {rack=0} 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,518 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv374934397=13, srv153811581=5, srv1254816566=0, srv2103249459=12, srv871355214=14, srv1729415633=6, srv1458730477=3, srv1446715537=2, srv1926746760=11, srv1853445901=10, srv1476753624=4, srv1736785213=7, srv1800367277=9, srv1382044164=1, srv175915361=8} racks are {rack=0} 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,519 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,519 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,520 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-07T18:27:15,520 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,520 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,520 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,520 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv374934397=13, srv153811581=5, srv1254816566=0, srv2103249459=12, srv871355214=14, srv1729415633=6, srv1458730477=3, srv1446715537=2, srv1926746760=11, srv1853445901=10, srv1476753624=4, srv1736785213=7, srv1800367277=9, srv1382044164=1, srv175915361=8} racks are {rack=0} 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,521 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv374934397=13, srv153811581=5, srv1254816566=0, srv2103249459=12, srv871355214=14, srv1729415633=6, srv1458730477=3, srv1446715537=2, srv1926746760=11, srv1853445901=10, srv1476753624=4, srv1736785213=7, srv1800367277=9, srv1382044164=1, srv175915361=8} racks are {rack=0} 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,521 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv374934397=13, srv153811581=5, srv1254816566=0, srv2103249459=12, srv871355214=14, srv1729415633=6, srv1458730477=3, srv1446715537=2, srv1926746760=11, srv1853445901=10, srv1476753624=4, srv1736785213=7, srv1800367277=9, srv1382044164=1, srv175915361=8} racks are {rack=0} 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,522 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,522 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv374934397=13, srv153811581=5, srv1254816566=0, srv2103249459=12, srv871355214=14, srv1729415633=6, srv1458730477=3, srv1446715537=2, srv1926746760=11, srv1853445901=10, srv1476753624=4, srv1736785213=7, srv1800367277=9, srv1382044164=1, srv175915361=8} racks are {rack=0} 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,523 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:15,523 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv374934397=13, srv153811581=5, srv1254816566=0, srv2103249459=12, srv871355214=14, srv1729415633=6, srv1458730477=3, srv1446715537=2, srv1926746760=11, srv1853445901=10, srv1476753624=4, srv1736785213=7, srv1800367277=9, srv1382044164=1, srv175915361=8} racks are {rack=0} 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv374934397=13, srv153811581=5, srv1254816566=0, srv2103249459=12, srv871355214=14, srv1729415633=6, srv1458730477=3, srv1446715537=2, srv1926746760=11, srv1853445901=10, srv1476753624=4, srv1736785213=7, srv1800367277=9, srv1382044164=1, srv175915361=8} racks are {rack=0} 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,524 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,524 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv374934397=13, srv153811581=5, srv1254816566=0, srv2103249459=12, srv871355214=14, srv1729415633=6, srv1458730477=3, srv1446715537=2, srv1926746760=11, srv1853445901=10, srv1476753624=4, srv1736785213=7, srv1800367277=9, srv1382044164=1, srv175915361=8} racks are {rack=0} 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,525 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv374934397=13, srv153811581=5, srv1254816566=0, srv2103249459=12, srv871355214=14, srv1729415633=6, srv1458730477=3, srv1446715537=2, srv1926746760=11, srv1853445901=10, srv1476753624=4, srv1736785213=7, srv1800367277=9, srv1382044164=1, srv175915361=8} racks are {rack=0} 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,525 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv374934397=13, srv153811581=5, srv1254816566=0, srv2103249459=12, srv871355214=14, srv1729415633=6, srv1458730477=3, srv1446715537=2, srv1926746760=11, srv1853445901=10, srv1476753624=4, srv1736785213=7, srv1800367277=9, srv1382044164=1, srv175915361=8} racks are {rack=0} 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-07T18:27:15,526 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,526 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv374934397=13, srv153811581=5, srv1254816566=0, srv2103249459=12, srv871355214=14, srv1729415633=6, srv1458730477=3, srv1446715537=2, srv1926746760=11, srv1853445901=10, srv1476753624=4, srv1736785213=7, srv1800367277=9, srv1382044164=1, srv175915361=8} racks are {rack=0} 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv374934397=13, srv153811581=5, srv1254816566=0, srv2103249459=12, srv871355214=14, srv1729415633=6, srv1458730477=3, srv1446715537=2, srv1926746760=11, srv1853445901=10, srv1476753624=4, srv1736785213=7, srv1800367277=9, srv1382044164=1, srv175915361=8} racks are {rack=0} 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,527 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,527 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=15, number of racks=1 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv251943523=5, srv677223158=7, srv1374547485=1, srv810605406=8, srv903168122=9, srv2022269786=4, srv180942508=2, srv302236070=6, srv1091762660=0, srv1875750666=3} racks are {rack=0} 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,528 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv251943523=5, srv677223158=7, srv1374547485=1, srv810605406=8, srv903168122=9, srv2022269786=4, srv180942508=2, srv302236070=6, srv1091762660=0, srv1875750666=3} racks are {rack=0} 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,528 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv251943523=5, srv677223158=7, srv1374547485=1, srv810605406=8, srv903168122=9, srv2022269786=4, srv180942508=2, srv302236070=6, srv1091762660=0, srv1875750666=3} racks are {rack=0} 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv251943523=5, srv677223158=7, srv1374547485=1, srv810605406=8, srv903168122=9, srv2022269786=4, srv180942508=2, srv302236070=6, srv1091762660=0, srv1875750666=3} racks are {rack=0} 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,529 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,529 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv251943523=5, srv677223158=7, srv1374547485=1, srv810605406=8, srv903168122=9, srv2022269786=4, srv180942508=2, srv302236070=6, srv1091762660=0, srv1875750666=3} racks are {rack=0} 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv251943523=5, srv677223158=7, srv1374547485=1, srv810605406=8, srv903168122=9, srv2022269786=4, srv180942508=2, srv302236070=6, srv1091762660=0, srv1875750666=3} racks are {rack=0} 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv251943523=5, srv677223158=7, srv1374547485=1, srv810605406=8, srv903168122=9, srv2022269786=4, srv180942508=2, srv302236070=6, srv1091762660=0, srv1875750666=3} racks are {rack=0} 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,530 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,530 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv251943523=5, srv677223158=7, srv1374547485=1, srv810605406=8, srv903168122=9, srv2022269786=4, srv180942508=2, srv302236070=6, srv1091762660=0, srv1875750666=3} racks are {rack=0} 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv251943523=5, srv677223158=7, srv1374547485=1, srv810605406=8, srv903168122=9, srv2022269786=4, srv180942508=2, srv302236070=6, srv1091762660=0, srv1875750666=3} racks are {rack=0} 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,531 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:15,531 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv251943523=5, srv677223158=7, srv1374547485=1, srv810605406=8, srv903168122=9, srv2022269786=4, srv180942508=2, srv302236070=6, srv1091762660=0, srv1875750666=3} racks are {rack=0} 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,532 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,532 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,532 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv145502323=2, srv144555443=1, srv2121753378=4, srv384371779=7, srv1260882196=0, srv449432685=8, srv157138757=3, srv244568735=5, srv251418472=6, srv517487779=9} racks are {rack=0} 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,532 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv145502323=2, srv144555443=1, srv2121753378=4, srv384371779=7, srv1260882196=0, srv449432685=8, srv157138757=3, srv244568735=5, srv251418472=6, srv517487779=9} racks are {rack=0} 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,533 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv145502323=2, srv144555443=1, srv2121753378=4, srv384371779=7, srv1260882196=0, srv449432685=8, srv157138757=3, srv244568735=5, srv251418472=6, srv517487779=9} racks are {rack=0} 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,533 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv145502323=2, srv144555443=1, srv2121753378=4, srv384371779=7, srv1260882196=0, srv449432685=8, srv157138757=3, srv244568735=5, srv251418472=6, srv517487779=9} racks are {rack=0} 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,534 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,534 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv145502323=2, srv144555443=1, srv2121753378=4, srv384371779=7, srv1260882196=0, srv449432685=8, srv157138757=3, srv244568735=5, srv251418472=6, srv517487779=9} racks are {rack=0} 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv145502323=2, srv144555443=1, srv2121753378=4, srv384371779=7, srv1260882196=0, srv449432685=8, srv157138757=3, srv244568735=5, srv251418472=6, srv517487779=9} racks are {rack=0} 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,535 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,535 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:15,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,536 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,536 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,536 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,536 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-07T18:27:15,536 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,537 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,537 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,538 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,538 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,539 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,539 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,540 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,540 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,541 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,541 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,542 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-07T18:27:15,542 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,543 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,543 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,544 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,544 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,545 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-07T18:27:15,545 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,546 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,546 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,547 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,549 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,551 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,552 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,553 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,554 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,556 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv826601004=7, srv147827216=2, srv485714015=6, srv2000348942=3, srv282822185=5, srv1275360649=0, srv1356514650=1, srv263704673=4, srv946549049=9, srv8699761=8} racks are {rack=0} 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,562 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,564 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,565 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,565 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,565 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,565 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,566 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-07T18:27:15,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,566 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,566 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,567 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,567 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,567 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,567 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-07T18:27:15,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,568 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-07T18:27:15,568 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,569 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,569 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,570 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,570 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,570 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,570 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,571 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,571 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,571 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,572 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,572 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,573 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,573 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,574 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,574 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,575 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,575 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,576 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,577 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,577 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,578 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,578 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,578 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,578 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:15,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,579 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,579 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-07T18:27:15,579 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,580 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,580 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,581 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,581 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,582 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,582 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,583 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,583 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,583 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,583 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,584 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,584 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,584 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,584 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-07T18:27:15,584 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,587 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,587 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,587 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,587 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,587 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,588 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,588 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,588 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,588 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,589 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,589 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,590 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,590 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,590 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,590 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,590 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,590 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,591 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,592 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-07T18:27:15,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1586974619=3, srv1820782527=4, srv18940077=5, srv2090623864=6, srv1248380965=1, srv727139671=8, srv827689952=9, srv307793679=7, srv1517239298=2, srv1213815980=0} racks are {rack=0} 2024-11-07T18:27:15,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,592 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,593 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:15,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,593 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,593 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,594 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,594 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,594 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,594 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,594 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,594 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,595 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,596 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,596 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,597 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-07T18:27:15,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,604 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,604 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,604 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,604 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,604 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,604 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,605 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,605 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,606 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,606 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,607 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-07T18:27:15,607 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,608 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,608 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,609 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-07T18:27:15,609 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,610 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,610 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,611 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,611 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,612 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,612 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,613 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,613 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,615 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,616 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-07T18:27:15,616 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,617 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,617 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,618 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:15,618 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,619 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,619 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,620 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,620 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,621 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,621 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,622 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,622 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv204074394=3, srv1220733651=0, srv1476471490=2, srv2053822405=4, srv86630172=9, srv2090251391=6, srv2066978530=5, srv47167627=8, srv27544821=7, srv1405043091=1} racks are {rack=0} 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,623 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:15,623 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv297963447=8, srv2020811642=4, srv2047749083=5, srv913203982=9, srv1358174130=0, srv1868713423=2, srv2102291291=6, srv2117184865=7, srv1896977116=3, srv1450439521=1} racks are {rack=0} 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv297963447=8, srv2020811642=4, srv2047749083=5, srv913203982=9, srv1358174130=0, srv1868713423=2, srv2102291291=6, srv2117184865=7, srv1896977116=3, srv1450439521=1} racks are {rack=0} 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,624 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv297963447=8, srv2020811642=4, srv2047749083=5, srv913203982=9, srv1358174130=0, srv1868713423=2, srv2102291291=6, srv2117184865=7, srv1896977116=3, srv1450439521=1} racks are {rack=0} 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,624 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv297963447=8, srv2020811642=4, srv2047749083=5, srv913203982=9, srv1358174130=0, srv1868713423=2, srv2102291291=6, srv2117184865=7, srv1896977116=3, srv1450439521=1} racks are {rack=0} 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv297963447=8, srv2020811642=4, srv2047749083=5, srv913203982=9, srv1358174130=0, srv1868713423=2, srv2102291291=6, srv2117184865=7, srv1896977116=3, srv1450439521=1} racks are {rack=0} 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,625 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,625 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv297963447=8, srv2020811642=4, srv2047749083=5, srv913203982=9, srv1358174130=0, srv1868713423=2, srv2102291291=6, srv2117184865=7, srv1896977116=3, srv1450439521=1} racks are {rack=0} 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv297963447=8, srv2020811642=4, srv2047749083=5, srv913203982=9, srv1358174130=0, srv1868713423=2, srv2102291291=6, srv2117184865=7, srv1896977116=3, srv1450439521=1} racks are {rack=0} 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,626 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv297963447=8, srv2020811642=4, srv2047749083=5, srv913203982=9, srv1358174130=0, srv1868713423=2, srv2102291291=6, srv2117184865=7, srv1896977116=3, srv1450439521=1} racks are {rack=0} 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,626 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv297963447=8, srv2020811642=4, srv2047749083=5, srv913203982=9, srv1358174130=0, srv1868713423=2, srv2102291291=6, srv2117184865=7, srv1896977116=3, srv1450439521=1} racks are {rack=0} 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv297963447=8, srv2020811642=4, srv2047749083=5, srv913203982=9, srv1358174130=0, srv1868713423=2, srv2102291291=6, srv2117184865=7, srv1896977116=3, srv1450439521=1} racks are {rack=0} 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,627 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,627 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv297963447=8, srv2020811642=4, srv2047749083=5, srv913203982=9, srv1358174130=0, srv1868713423=2, srv2102291291=6, srv2117184865=7, srv1896977116=3, srv1450439521=1} racks are {rack=0} 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv297963447=8, srv2020811642=4, srv2047749083=5, srv913203982=9, srv1358174130=0, srv1868713423=2, srv2102291291=6, srv2117184865=7, srv1896977116=3, srv1450439521=1} racks are {rack=0} 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv297963447=8, srv2020811642=4, srv2047749083=5, srv913203982=9, srv1358174130=0, srv1868713423=2, srv2102291291=6, srv2117184865=7, srv1896977116=3, srv1450439521=1} racks are {rack=0} 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,628 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,628 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv297963447=8, srv2020811642=4, srv2047749083=5, srv913203982=9, srv1358174130=0, srv1868713423=2, srv2102291291=6, srv2117184865=7, srv1896977116=3, srv1450439521=1} racks are {rack=0} 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv297963447=8, srv2020811642=4, srv2047749083=5, srv913203982=9, srv1358174130=0, srv1868713423=2, srv2102291291=6, srv2117184865=7, srv1896977116=3, srv1450439521=1} racks are {rack=0} 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,629 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv297963447=8, srv2020811642=4, srv2047749083=5, srv913203982=9, srv1358174130=0, srv1868713423=2, srv2102291291=6, srv2117184865=7, srv1896977116=3, srv1450439521=1} racks are {rack=0} 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,629 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv123192624=1, srv1398753250=2, srv1832100338=6, srv878007275=9, srv1429056242=3, srv1100510273=0, srv530192013=8, srv176953505=5, srv1571223488=4, srv1834026747=7} racks are {rack=0} 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,630 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,630 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv123192624=1, srv1398753250=2, srv1832100338=6, srv878007275=9, srv1429056242=3, srv1100510273=0, srv530192013=8, srv176953505=5, srv1571223488=4, srv1834026747=7} racks are {rack=0} 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv123192624=1, srv1398753250=2, srv1832100338=6, srv878007275=9, srv1429056242=3, srv1100510273=0, srv530192013=8, srv176953505=5, srv1571223488=4, srv1834026747=7} racks are {rack=0} 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,631 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv123192624=1, srv1398753250=2, srv1832100338=6, srv878007275=9, srv1429056242=3, srv1100510273=0, srv530192013=8, srv176953505=5, srv1571223488=4, srv1834026747=7} racks are {rack=0} 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv123192624=1, srv1398753250=2, srv1832100338=6, srv878007275=9, srv1429056242=3, srv1100510273=0, srv530192013=8, srv176953505=5, srv1571223488=4, srv1834026747=7} racks are {rack=0} 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,632 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv123192624=1, srv1398753250=2, srv1832100338=6, srv878007275=9, srv1429056242=3, srv1100510273=0, srv530192013=8, srv176953505=5, srv1571223488=4, srv1834026747=7} racks are {rack=0} 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv123192624=1, srv1398753250=2, srv1832100338=6, srv878007275=9, srv1429056242=3, srv1100510273=0, srv530192013=8, srv176953505=5, srv1571223488=4, srv1834026747=7} racks are {rack=0} 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv123192624=1, srv1398753250=2, srv1832100338=6, srv878007275=9, srv1429056242=3, srv1100510273=0, srv530192013=8, srv176953505=5, srv1571223488=4, srv1834026747=7} racks are {rack=0} 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv407294825=6, srv45603910=8, srv2052750157=4, srv198916596=3, srv129898649=1, srv1160436877=0, srv330583036=5, srv74695875=9, srv1679540514=2, srv412068685=7} racks are {rack=0} 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,634 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv407294825=6, srv45603910=8, srv2052750157=4, srv198916596=3, srv129898649=1, srv1160436877=0, srv330583036=5, srv74695875=9, srv1679540514=2, srv412068685=7} racks are {rack=0} 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv407294825=6, srv45603910=8, srv2052750157=4, srv198916596=3, srv129898649=1, srv1160436877=0, srv330583036=5, srv74695875=9, srv1679540514=2, srv412068685=7} racks are {rack=0} 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv407294825=6, srv45603910=8, srv2052750157=4, srv198916596=3, srv129898649=1, srv1160436877=0, srv330583036=5, srv74695875=9, srv1679540514=2, srv412068685=7} racks are {rack=0} 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv407294825=6, srv45603910=8, srv2052750157=4, srv198916596=3, srv129898649=1, srv1160436877=0, srv330583036=5, srv74695875=9, srv1679540514=2, srv412068685=7} racks are {rack=0} 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,636 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv407294825=6, srv45603910=8, srv2052750157=4, srv198916596=3, srv129898649=1, srv1160436877=0, srv330583036=5, srv74695875=9, srv1679540514=2, srv412068685=7} racks are {rack=0} 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,636 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,636 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv407294825=6, srv45603910=8, srv2052750157=4, srv198916596=3, srv129898649=1, srv1160436877=0, srv330583036=5, srv74695875=9, srv1679540514=2, srv412068685=7} racks are {rack=0} 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv407294825=6, srv45603910=8, srv2052750157=4, srv198916596=3, srv129898649=1, srv1160436877=0, srv330583036=5, srv74695875=9, srv1679540514=2, srv412068685=7} racks are {rack=0} 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,637 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,637 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv407294825=6, srv45603910=8, srv2052750157=4, srv198916596=3, srv129898649=1, srv1160436877=0, srv330583036=5, srv74695875=9, srv1679540514=2, srv412068685=7} racks are {rack=0} 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2094300132=6, srv573206040=8, srv1248475255=0, srv1929496269=5, srv1300343321=2, srv1830594617=4, srv318433337=7, srv972302135=9, srv1265453147=1, srv1372562891=3} racks are {rack=0} 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,638 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,638 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2094300132=6, srv573206040=8, srv1248475255=0, srv1929496269=5, srv1300343321=2, srv1830594617=4, srv318433337=7, srv972302135=9, srv1265453147=1, srv1372562891=3} racks are {rack=0} 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2094300132=6, srv573206040=8, srv1248475255=0, srv1929496269=5, srv1300343321=2, srv1830594617=4, srv318433337=7, srv972302135=9, srv1265453147=1, srv1372562891=3} racks are {rack=0} 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,639 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,639 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2094300132=6, srv573206040=8, srv1248475255=0, srv1929496269=5, srv1300343321=2, srv1830594617=4, srv318433337=7, srv972302135=9, srv1265453147=1, srv1372562891=3} racks are {rack=0} 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2094300132=6, srv573206040=8, srv1248475255=0, srv1929496269=5, srv1300343321=2, srv1830594617=4, srv318433337=7, srv972302135=9, srv1265453147=1, srv1372562891=3} racks are {rack=0} 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,640 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,640 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2094300132=6, srv573206040=8, srv1248475255=0, srv1929496269=5, srv1300343321=2, srv1830594617=4, srv318433337=7, srv972302135=9, srv1265453147=1, srv1372562891=3} racks are {rack=0} 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2094300132=6, srv573206040=8, srv1248475255=0, srv1929496269=5, srv1300343321=2, srv1830594617=4, srv318433337=7, srv972302135=9, srv1265453147=1, srv1372562891=3} racks are {rack=0} 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,641 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,641 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2094300132=6, srv573206040=8, srv1248475255=0, srv1929496269=5, srv1300343321=2, srv1830594617=4, srv318433337=7, srv972302135=9, srv1265453147=1, srv1372562891=3} racks are {rack=0} 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2094300132=6, srv573206040=8, srv1248475255=0, srv1929496269=5, srv1300343321=2, srv1830594617=4, srv318433337=7, srv972302135=9, srv1265453147=1, srv1372562891=3} racks are {rack=0} 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,642 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2094300132=6, srv573206040=8, srv1248475255=0, srv1929496269=5, srv1300343321=2, srv1830594617=4, srv318433337=7, srv972302135=9, srv1265453147=1, srv1372562891=3} racks are {rack=0} 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,642 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,643 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,643 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,643 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,643 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,644 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,644 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,644 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,644 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,645 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-07T18:27:15,645 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,646 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,646 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,647 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,649 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,649 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,650 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,650 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,651 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,651 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,652 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,652 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,653 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,653 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,654 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,654 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,655 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-07T18:27:15,655 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,656 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,656 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,657 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,657 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,658 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,658 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,659 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,659 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,660 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-07T18:27:15,660 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,661 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,661 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,662 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,662 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,663 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,664 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,665 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,665 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,666 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,666 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,667 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,667 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,668 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,668 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,669 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,669 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,670 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,670 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,671 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,671 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,672 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-07T18:27:15,672 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,673 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,673 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,674 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,674 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,675 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,675 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,676 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,676 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,677 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-07T18:27:15,677 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,678 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,678 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,679 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,679 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,680 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-07T18:27:15,680 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,681 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,681 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,682 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,682 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,683 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,683 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,684 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,684 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,685 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,685 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,686 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,686 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,687 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-07T18:27:15,687 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,688 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,688 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,689 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,689 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,690 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,690 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,691 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,691 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,692 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,692 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,693 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,693 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,694 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-07T18:27:15,694 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv539028086=7, srv1683122471=0, srv41008774=5, srv1975496611=3, srv232594532=4, srv456388983=6, srv1858697067=1, srv846838188=8, srv854206283=9, srv1948708978=2} racks are {rack=0} 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,695 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,695 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,696 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,696 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,696 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,696 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,697 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,697 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,697 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,697 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,698 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,698 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,699 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,699 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,700 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,700 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,701 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,701 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,702 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,702 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,703 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,703 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,704 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,704 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,705 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,705 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,706 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,706 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,707 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,708 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,709 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,709 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,710 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,711 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,713 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,713 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,714 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,715 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,715 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,717 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,717 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table141 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,718 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,718 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table141) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table142 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table142) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table143 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table143) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,719 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table144 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,719 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table144) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table145 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table145) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table146 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,720 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,720 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table146) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table147 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table147) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table148 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table148) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,721 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table149 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,721 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table149) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,722 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,722 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,723 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,723 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,724 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,724 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,725 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,725 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,726 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,726 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,727 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,727 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,728 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,728 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table150 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table150) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,729 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,729 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table151 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table151) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,730 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,730 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,731 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,731 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,732 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-07T18:27:15,732 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,733 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,733 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,734 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,734 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,735 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,735 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,736 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-07T18:27:15,736 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,737 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-07T18:27:15,737 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,738 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,738 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table140 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table140) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,739 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,739 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,740 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,740 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,741 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,741 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,742 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,742 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,743 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table152 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,743 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table152) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table153 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,744 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,744 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table153) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table154 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table154) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,745 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,745 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,746 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,746 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,747 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,747 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,748 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,748 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,749 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,749 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,750 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,750 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,751 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,754 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-07T18:27:15,754 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,755 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,755 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,756 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,756 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,757 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,757 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,758 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-07T18:27:15,758 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,759 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,759 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1757459025=4, srv1357203639=1, srv668473231=7, srv1164093339=0, srv977891931=9, srv1478000985=3, srv1935322292=5, srv777122632=8, srv138316344=2, srv2146165754=6} racks are {rack=0} 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=10, number of racks=1 2024-11-07T18:27:15,760 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,760 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv717940944=6, srv383501251=5, srv874964839=7, srv288644869=4, srv1508173509=1, srv1865106650=2, srv13970210=0, srv1996571961=3} racks are {rack=0} 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv717940944=6, srv383501251=5, srv874964839=7, srv288644869=4, srv1508173509=1, srv1865106650=2, srv13970210=0, srv1996571961=3} racks are {rack=0} 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,761 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,761 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv717940944=6, srv383501251=5, srv874964839=7, srv288644869=4, srv1508173509=1, srv1865106650=2, srv13970210=0, srv1996571961=3} racks are {rack=0} 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv717940944=6, srv383501251=5, srv874964839=7, srv288644869=4, srv1508173509=1, srv1865106650=2, srv13970210=0, srv1996571961=3} racks are {rack=0} 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,762 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv717940944=6, srv383501251=5, srv874964839=7, srv288644869=4, srv1508173509=1, srv1865106650=2, srv13970210=0, srv1996571961=3} racks are {rack=0} 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,762 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv717940944=6, srv383501251=5, srv874964839=7, srv288644869=4, srv1508173509=1, srv1865106650=2, srv13970210=0, srv1996571961=3} racks are {rack=0} 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv717940944=6, srv383501251=5, srv874964839=7, srv288644869=4, srv1508173509=1, srv1865106650=2, srv13970210=0, srv1996571961=3} racks are {rack=0} 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,763 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,763 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv717940944=6, srv383501251=5, srv874964839=7, srv288644869=4, srv1508173509=1, srv1865106650=2, srv13970210=0, srv1996571961=3} racks are {rack=0} 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv717940944=6, srv383501251=5, srv874964839=7, srv288644869=4, srv1508173509=1, srv1865106650=2, srv13970210=0, srv1996571961=3} racks are {rack=0} 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,764 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,764 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv717940944=6, srv383501251=5, srv874964839=7, srv288644869=4, srv1508173509=1, srv1865106650=2, srv13970210=0, srv1996571961=3} racks are {rack=0} 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv717940944=6, srv383501251=5, srv874964839=7, srv288644869=4, srv1508173509=1, srv1865106650=2, srv13970210=0, srv1996571961=3} racks are {rack=0} 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,765 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv717940944=6, srv383501251=5, srv874964839=7, srv288644869=4, srv1508173509=1, srv1865106650=2, srv13970210=0, srv1996571961=3} racks are {rack=0} 2024-11-07T18:27:15,765 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv717940944=6, srv383501251=5, srv874964839=7, srv288644869=4, srv1508173509=1, srv1865106650=2, srv13970210=0, srv1996571961=3} racks are {rack=0} 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv717940944=6, srv383501251=5, srv874964839=7, srv288644869=4, srv1508173509=1, srv1865106650=2, srv13970210=0, srv1996571961=3} racks are {rack=0} 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,766 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,766 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv817590335=6, srv1871774925=2, srv1701547639=1, srv1533740421=0, srv846054720=7, srv1972596374=4, srv673828959=5, srv1904267397=3} racks are {rack=0} 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,767 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,767 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,767 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv817590335=6, srv1871774925=2, srv1701547639=1, srv1533740421=0, srv846054720=7, srv1972596374=4, srv673828959=5, srv1904267397=3} racks are {rack=0} 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,767 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv817590335=6, srv1871774925=2, srv1701547639=1, srv1533740421=0, srv846054720=7, srv1972596374=4, srv673828959=5, srv1904267397=3} racks are {rack=0} 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,768 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv817590335=6, srv1871774925=2, srv1701547639=1, srv1533740421=0, srv846054720=7, srv1972596374=4, srv673828959=5, srv1904267397=3} racks are {rack=0} 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,768 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv817590335=6, srv1871774925=2, srv1701547639=1, srv1533740421=0, srv846054720=7, srv1972596374=4, srv673828959=5, srv1904267397=3} racks are {rack=0} 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv817590335=6, srv1871774925=2, srv1701547639=1, srv1533740421=0, srv846054720=7, srv1972596374=4, srv673828959=5, srv1904267397=3} racks are {rack=0} 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,769 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,769 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv817590335=6, srv1871774925=2, srv1701547639=1, srv1533740421=0, srv846054720=7, srv1972596374=4, srv673828959=5, srv1904267397=3} racks are {rack=0} 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv817590335=6, srv1871774925=2, srv1701547639=1, srv1533740421=0, srv846054720=7, srv1972596374=4, srv673828959=5, srv1904267397=3} racks are {rack=0} 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,770 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:15,770 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv817590335=6, srv1871774925=2, srv1701547639=1, srv1533740421=0, srv846054720=7, srv1972596374=4, srv673828959=5, srv1904267397=3} racks are {rack=0} 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv817590335=6, srv1871774925=2, srv1701547639=1, srv1533740421=0, srv846054720=7, srv1972596374=4, srv673828959=5, srv1904267397=3} racks are {rack=0} 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,771 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:15,771 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv817590335=6, srv1871774925=2, srv1701547639=1, srv1533740421=0, srv846054720=7, srv1972596374=4, srv673828959=5, srv1904267397=3} racks are {rack=0} 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv817590335=6, srv1871774925=2, srv1701547639=1, srv1533740421=0, srv846054720=7, srv1972596374=4, srv673828959=5, srv1904267397=3} racks are {rack=0} 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv817590335=6, srv1871774925=2, srv1701547639=1, srv1533740421=0, srv846054720=7, srv1972596374=4, srv673828959=5, srv1904267397=3} racks are {rack=0} 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,772 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,772 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,773 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:15,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv817590335=6, srv1871774925=2, srv1701547639=1, srv1533740421=0, srv846054720=7, srv1972596374=4, srv673828959=5, srv1904267397=3} racks are {rack=0} 2024-11-07T18:27:15,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,773 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,773 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,773 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,773 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,774 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:15,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,774 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,775 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-07T18:27:15,775 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,776 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,776 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,777 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,777 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,778 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-07T18:27:15,778 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,779 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,779 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,780 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,780 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,781 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,781 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,782 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,782 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,783 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,783 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,784 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,784 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,785 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-07T18:27:15,785 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,786 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,786 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,787 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,787 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,788 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,788 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,789 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,789 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,790 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,790 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,791 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-07T18:27:15,791 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,792 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,792 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,793 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,794 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,794 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,795 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,795 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,796 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-07T18:27:15,796 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,797 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-07T18:27:15,797 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,798 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,798 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,799 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,799 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,800 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:15,800 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,801 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,801 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,802 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-07T18:27:15,802 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,803 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,803 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,804 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,804 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,805 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-07T18:27:15,805 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,806 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,806 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,807 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-07T18:27:15,807 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,808 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-07T18:27:15,808 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,809 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,809 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,810 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-07T18:27:15,810 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,811 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,811 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,812 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-07T18:27:15,812 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,813 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,813 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-07T18:27:15,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,814 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,814 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-07T18:27:15,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,814 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,815 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,815 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,816 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,816 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,817 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,817 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,818 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,818 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,820 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,820 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,821 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,821 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,822 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,822 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,823 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-07T18:27:15,823 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,824 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,824 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,825 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv63059952=4, srv669892746=5, srv1678292776=2, srv689703365=6, srv886838906=7, srv1127852274=1, srv1070359091=0, srv2048817373=3} racks are {rack=0} 2024-11-07T18:27:15,825 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,826 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,826 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,826 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,826 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,828 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:15,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,828 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,828 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,828 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table120 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table120) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,829 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-07T18:27:15,829 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table121 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table121) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,830 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,830 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table122 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table122) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,831 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,831 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table123 2024-11-07T18:27:15,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,832 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table123) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,832 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-07T18:27:15,832 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table124 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table124) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,833 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,833 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table125 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table125) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table126 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table126) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,834 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table127 2024-11-07T18:27:15,834 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table127) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table128 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table128) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table129 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,835 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,835 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table129) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table90 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table90) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table91 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table91) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,836 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table92 2024-11-07T18:27:15,836 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table92) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table93 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table93) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,837 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table94 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,837 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table94) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table95 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table95) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,838 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table96 2024-11-07T18:27:15,838 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table96) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table97 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table97) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,839 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,839 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table98 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table98) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,840 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,840 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table99 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table99) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,841 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,841 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table110 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table110) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table111 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table111) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table112 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table112) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table113 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table113) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table114 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,843 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,843 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table114) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table115 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table115) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table116 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table116) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,844 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table117 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,844 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table117) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table118 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table118) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table119 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table119) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,845 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table80 2024-11-07T18:27:15,845 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table80) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table81 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table81) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,846 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table82 2024-11-07T18:27:15,846 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table82) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table83 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table83) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,847 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table84 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,847 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table84) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table85 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table85) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table86 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,848 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,848 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table86) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table87 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,849 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table87) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,849 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table88 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,849 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,849 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table88) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table89 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table89) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table79 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,850 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,850 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table79) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,851 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,851 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,852 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,852 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,853 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table70 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,853 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table70) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table71 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table71) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table72 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,854 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,854 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table72) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table73 2024-11-07T18:27:15,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,855 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,855 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,855 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table73) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,855 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table74 2024-11-07T18:27:15,855 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table74) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,856 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table75 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,856 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table75) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table76 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,857 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,857 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table76) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table77 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,858 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,858 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table77) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table78 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table78) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,859 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table68 2024-11-07T18:27:15,859 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table68) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table130 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table130) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table69 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,860 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,860 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table69) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table131 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table131) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table132 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,861 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,861 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table132) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table133 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table133) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table134 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table134) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,862 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table135 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,862 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table135) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table136 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table136) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table137 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table137) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,863 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table138 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,863 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table138) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table139 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table139) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table60 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table60) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,864 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table61 2024-11-07T18:27:15,864 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table61) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table62 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table62) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,865 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table63 2024-11-07T18:27:15,865 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,866 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table63) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,866 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table64 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,866 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,866 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table64) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table65 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table65) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table66 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,867 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,867 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table66) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table67 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table67) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table57 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table57) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,868 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table58 2024-11-07T18:27:15,868 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table58) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table59 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table59) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,869 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-07T18:27:15,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,870 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,870 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,871 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,871 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,872 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table56 2024-11-07T18:27:15,872 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table56) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,873 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-07T18:27:15,873 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,874 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-07T18:27:15,874 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,875 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,875 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,876 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,876 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,877 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,877 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,878 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,878 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,879 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-07T18:27:15,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,879 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,879 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table100 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table100) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,880 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-07T18:27:15,880 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table101 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table101) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table102 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,881 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,881 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table102) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table103 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table103) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table104 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table104) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table105 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,882 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,882 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table105) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table106 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table106) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table107 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table107) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,883 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table108 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,883 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table108) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table109 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table109) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,884 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-07T18:27:15,884 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,885 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-07T18:27:15,885 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,886 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-07T18:27:15,886 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,887 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,887 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,889 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,889 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-07T18:27:15,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,896 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,896 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,896 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,896 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-07T18:27:15,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,896 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,897 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-07T18:27:15,897 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv811910423=7, srv1694212815=4, srv298828147=5, srv1445327422=2, srv1054700865=0, srv790241299=6, srv1627084004=3, srv1135992768=1} racks are {rack=0} 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=8, number of racks=1 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,898 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv691482499=4, srv591394861=3, srv2061498643=1, srv2001582661=0, srv213595036=2} racks are {rack=0} 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,898 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-07T18:27:15,898 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,898 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv691482499=4, srv591394861=3, srv2061498643=1, srv2001582661=0, srv213595036=2} racks are {rack=0} 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv691482499=4, srv591394861=3, srv2061498643=1, srv2001582661=0, srv213595036=2} racks are {rack=0} 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv691482499=4, srv591394861=3, srv2061498643=1, srv2001582661=0, srv213595036=2} racks are {rack=0} 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv691482499=4, srv591394861=3, srv2061498643=1, srv2001582661=0, srv213595036=2} racks are {rack=0} 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,899 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,899 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=5, number of racks=1 2024-11-07T18:27:15,900 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,900 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,919 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table13 2024-11-07T18:27:15,919 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:15,920 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:15,921 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:15,922 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:15,923 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:15,924 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:15,925 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:15,925 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:15,926 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:15,927 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:15,928 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:15,929 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:15,930 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:15,930 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:15,931 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,931 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table13) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,931 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table14 2024-11-07T18:27:15,931 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:15,932 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:15,933 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:15,934 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:15,935 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:15,936 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:15,937 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:15,937 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:15,938 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:15,939 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:15,940 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:15,941 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:15,942 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:15,942 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table14) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,942 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table15 2024-11-07T18:27:15,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:15,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,943 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:15,944 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:15,945 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:15,946 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:15,947 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:15,948 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:15,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,948 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:15,949 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:15,950 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:15,951 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:15,952 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:15,953 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:15,953 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:15,954 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,954 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table15) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,954 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table16 2024-11-07T18:27:15,954 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:15,955 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:15,956 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:15,957 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:15,958 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:15,959 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:15,960 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:15,960 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:15,961 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:15,962 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:15,963 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:15,964 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:15,965 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:15,965 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:15,966 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,966 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table16) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,966 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table17 2024-11-07T18:27:15,966 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:15,967 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:15,968 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:15,969 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:15,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:15,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:15,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:15,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:15,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:15,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:15,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:15,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:15,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:15,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:15,972 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:15,972 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:15,973 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:15,974 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:15,975 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:15,976 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:15,976 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:15,977 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,977 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table17) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,977 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table18 2024-11-07T18:27:15,977 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:15,978 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:15,979 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:15,980 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:15,981 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:15,982 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:15,982 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:15,983 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:15,984 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:15,985 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:15,986 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:15,987 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:15,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:15,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:15,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:15,988 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:15,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:15,988 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,988 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table18) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,988 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table19 2024-11-07T18:27:15,988 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:15,989 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:15,990 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:15,991 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:15,992 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:15,993 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:15,994 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:15,994 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:15,995 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:15,996 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:15,997 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:15,998 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:15,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:15,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:15,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:15,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:15,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:15,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:15,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:15,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:15,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:15,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:15,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:15,999 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:15,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:15,999 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:15,999 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table19) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:15,999 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table50 2024-11-07T18:27:15,999 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,000 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,001 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,002 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,003 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,004 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,005 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,005 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,006 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,007 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,009 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,010 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table50) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,010 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table51 2024-11-07T18:27:16,010 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,011 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,012 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,013 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,014 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,015 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,016 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,016 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,017 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,018 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,019 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,020 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,021 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,021 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,021 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table51) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,022 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table52 2024-11-07T18:27:16,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,022 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,023 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,024 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,025 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,026 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,027 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,027 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,028 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,029 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,030 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,031 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,032 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,033 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,033 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,033 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table52) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,033 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table53 2024-11-07T18:27:16,033 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,034 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,035 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,036 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,037 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,038 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,039 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,039 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,040 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,041 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,043 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,044 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,044 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table53) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,044 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table10 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,045 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,046 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,047 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,048 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,049 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,050 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,050 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,051 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,052 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,053 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,054 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,055 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,055 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,055 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table10) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,056 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table54 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,056 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,057 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,058 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,059 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,060 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,061 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,061 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,062 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,063 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,064 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,065 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,066 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,066 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,067 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table54) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,067 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table11 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,067 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,068 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,069 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,070 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,071 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,072 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,074 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,075 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,076 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,077 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,077 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table11) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,077 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table55 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,078 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,079 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,080 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,081 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,082 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,083 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,083 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,084 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,085 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,086 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,087 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,088 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,088 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,088 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table55) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,088 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table12 2024-11-07T18:27:16,088 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,089 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,090 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,091 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,092 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,093 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,094 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,094 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,095 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,096 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,097 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,098 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,099 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,099 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,099 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table12) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,099 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table46 2024-11-07T18:27:16,099 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,100 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,101 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,102 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,103 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,104 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,105 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,105 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,106 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,107 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,109 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,109 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,110 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,110 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table46) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,110 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table47 2024-11-07T18:27:16,110 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,111 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,112 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,113 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,114 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,115 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,116 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,116 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,117 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,118 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,119 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,120 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,121 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,121 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,121 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table47) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,121 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table48 2024-11-07T18:27:16,121 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,122 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,123 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,124 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,125 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,126 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,127 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,127 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,128 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,129 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,130 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,131 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,131 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,132 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,132 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table48) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,132 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table49 2024-11-07T18:27:16,132 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,133 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,134 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,135 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,136 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,137 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,137 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,138 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,139 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,140 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,141 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,142 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,142 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,142 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,142 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table49) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,142 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table40 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,145 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,146 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,147 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,147 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,148 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,149 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,150 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,151 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,152 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,152 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,152 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table40) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,152 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table41 2024-11-07T18:27:16,152 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,153 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,154 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,155 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,156 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,157 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,157 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,158 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,159 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,160 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,161 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,161 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,162 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,162 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table41) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,162 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table42 2024-11-07T18:27:16,162 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,163 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,164 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,165 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,166 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,167 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,167 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,168 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,169 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,170 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,171 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,172 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,172 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,172 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table42) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,172 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table43 2024-11-07T18:27:16,172 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,173 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,174 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,175 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,176 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,177 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,177 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,178 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,179 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,180 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,181 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,181 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,182 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,182 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table43) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,182 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table44 2024-11-07T18:27:16,182 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,183 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,184 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,185 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,186 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,187 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,187 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,188 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,189 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,190 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,191 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,191 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,192 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,192 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table44) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,192 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table45 2024-11-07T18:27:16,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,192 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,193 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,194 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,195 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,196 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,197 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,197 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,198 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,199 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,200 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,201 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table45) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,201 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table35 2024-11-07T18:27:16,201 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,202 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,203 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,204 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,205 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,206 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,206 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,207 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,208 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,209 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,210 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,210 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,211 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,211 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table35) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,211 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table36 2024-11-07T18:27:16,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,211 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,212 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,213 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,214 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,215 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,216 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,216 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,217 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,218 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,219 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,220 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,220 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table36) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,220 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table37 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,221 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,222 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,223 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,224 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,225 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,225 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,226 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,227 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,228 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,229 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,230 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,230 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,230 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table37) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,230 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table38 2024-11-07T18:27:16,230 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,231 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,232 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,233 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,234 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,235 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,235 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,236 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,237 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,238 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,239 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,239 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table38) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,239 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table39 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,240 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,241 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,242 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,243 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,244 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,244 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,245 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,246 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,247 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,248 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table39) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,248 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table5 2024-11-07T18:27:16,248 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,249 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,250 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,251 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,252 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,253 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,253 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,254 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,255 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,256 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,257 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table5) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,257 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table4 2024-11-07T18:27:16,257 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,258 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,259 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,260 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,261 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,262 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,262 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,263 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,264 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,265 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,266 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,266 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table4) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,266 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table3 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,267 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,268 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,269 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,270 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,271 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,271 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,272 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,273 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,274 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,275 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,275 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,276 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,276 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table3) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,276 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table2 2024-11-07T18:27:16,276 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,277 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,278 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,279 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,280 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,280 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,281 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,282 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,283 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,284 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,284 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,285 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,285 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table2) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,285 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table1 2024-11-07T18:27:16,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,285 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,286 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,287 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,288 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,289 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,289 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,290 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,291 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,292 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,293 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,293 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,294 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,294 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table1) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,294 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table0 2024-11-07T18:27:16,294 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,295 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,296 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,297 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,298 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,298 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,299 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,300 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,301 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,302 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,302 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,303 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table0) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,303 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table30 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,303 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,304 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,305 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,306 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,307 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,307 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,308 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,309 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,310 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,311 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,311 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,312 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table30) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,312 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table9 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,312 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,313 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,314 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,315 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,316 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,316 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,317 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,318 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,319 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,320 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,320 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,321 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table9) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,321 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table31 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,321 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,322 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,323 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,324 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,325 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,325 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,326 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,327 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,328 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,329 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,329 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,330 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,330 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table31) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,330 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table8 2024-11-07T18:27:16,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,330 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,331 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,332 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,333 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,334 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,334 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,335 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,336 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,337 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,338 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,338 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table8) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,339 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table32 2024-11-07T18:27:16,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,341 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,342 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,343 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,343 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,344 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,345 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,346 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,347 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,347 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,348 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,348 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table32) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,348 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table7 2024-11-07T18:27:16,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,348 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,349 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,350 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,351 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,352 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,352 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,353 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,354 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,355 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,356 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,356 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table7) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,356 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table33 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,357 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,358 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,359 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,360 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,361 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,361 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,362 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,363 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,364 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,365 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,365 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table33) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,365 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table6 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,366 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,367 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,368 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,369 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,370 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,370 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,371 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,372 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,373 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,374 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,374 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table6) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,374 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table34 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,375 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,376 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,377 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,378 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,379 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,379 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,380 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,381 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,382 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,383 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,383 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,383 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table34) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,384 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table24 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,384 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,385 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,386 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,387 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,388 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,388 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,389 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,390 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,391 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,392 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,392 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table24) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,392 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table25 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,393 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,394 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,395 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,396 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,397 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,397 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,398 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,399 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,400 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,401 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,401 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table25) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,401 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table26 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,402 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,403 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,404 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,405 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,406 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,406 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,407 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,408 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,409 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,410 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,410 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,411 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,411 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table26) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,411 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table27 2024-11-07T18:27:16,411 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,412 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,413 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,414 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,415 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,416 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,416 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,417 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,418 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,419 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,420 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table27) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,420 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table28 2024-11-07T18:27:16,420 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,421 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,422 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,423 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,424 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,425 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,425 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,426 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,427 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,428 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,429 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,429 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,430 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,430 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table28) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,430 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table29 2024-11-07T18:27:16,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,430 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,431 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,432 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,433 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,434 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,434 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,435 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,436 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,437 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,438 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,438 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,439 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,439 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table29) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,439 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table20 2024-11-07T18:27:16,439 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,440 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,441 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,442 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,443 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,444 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,444 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,445 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,446 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,447 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,448 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,448 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,448 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table20) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,449 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table21 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,449 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,450 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,451 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,452 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,453 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,453 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,454 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,455 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,456 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,457 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,457 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table21) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,457 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table22 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,460 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,461 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,462 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,462 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,463 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,464 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,465 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,466 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,466 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,466 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table22) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,467 INFO [Time-limited test {}] balancer.BaseLoadBalancer(770): Start Generate Balance plan for table: table23 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1760873703=136, srv487641141=295, srv979813971=386, srv1467988909=79, srv209990541=225, srv1661127998=117, srv2064873542=219, srv1724783155=128, srv1975322139=201, srv985856124=389, srv1559343395=97, srv46485979=289, srv1563762552=100, srv269305243=255, srv1593098168=107, srv2055920099=213, srv52023040=302, srv649086372=331, srv1924022047=181, srv1123380131=22, srv587139036=316, srv616761609=321, srv476424640=291, srv179080286=146, srv1392432534=56, srv1765835556=138, srv1158262902=28, srv2054096472=211, srv663169963=335, srv1026082539=5, srv1947317144=191, srv2092019603=223, srv929290885=379, srv2055291076=212, srv1844429668=160, srv499559406=297, srv1635730604=114, srv1535594239=91, srv1631784024=112, srv223496424=238, srv2139924809=235, srv2011167810=207, srv783576222=353, srv1680309007=120, srv28053661=259, srv2064735505=218, srv1517745371=88, srv905018098=375, srv237636290=247, srv1041313102=12, srv740086619=348, srv1935830738=187, srv382318245=274, srv647381916=329, srv1767329857=139, srv1271340433=38, srv979914337=387, srv1077346284=17, srv579732980=315, srv48402974=294, srv479770161=293, srv548660583=310, srv559242821=312, srv2121724882=232, srv427673589=282, srv887666197=373, srv1282179868=40, srv1093377056=20, srv1944322837=190, srv2118880960=231, srv1761143647=137, srv180139473=151, srv322565774=264, srv1869846666=167, srv1931882733=185, srv1529765575=90, srv84055186=365, srv1483946433=83, srv182027797=154, srv114646695=26, srv1511456868=86, srv1753888598=135, srv1981336619=202, srv27878370=258, srv662006763=334, srv321432763=263, srv346755167=270, srv398902796=278, srv1083892755=19, srv1995809887=204, srv2057285325=214, srv576808786=314, srv1396371163=57, srv1437069173=68, srv975243707=385, srv984796188=388, srv547095670=309, srv1348392668=50, srv860035491=368, srv599103826=318, srv1014853623=4, srv2065113752=220, srv226997986=241, srv1545970891=95, srv502681618=299, srv55679571=311, srv1239617567=35, srv1002061997=0, srv234725207=244, srv2103635956=226, srv82075052=360, srv258499984=251, srv2059777612=215, srv648334137=330, srv915495241=376, srv1740124192=131, srv822252004=361, srv267637589=254, srv1401824266=59, srv1515139587=87, srv536744696=308, srv1879512001=169, srv14935724=84, srv178291138=145, srv1502331421=85, srv1821713130=155, srv2003778343=206, srv1907805991=177, srv52422143=305, srv745476995=349, srv1057375783=14, srv1709906950=124, srv2065482178=221, srv764449770=350, srv1429162280=64, srv353245389=271, srv634555266=325, srv9463518=380, srv1462630765=77, srv1822293238=156, srv154221726=93, srv687893040=339, srv1468800392=80, srv477010475=292, srv192876862=184, srv1663444014=118, srv509658814=300, srv240624603=249, srv158279087=102, srv2108071957=229, srv1586084349=103, srv1933600438=186, srv2011416132=208, srv1963460171=196, srv575954021=313, srv1389198525=55, srv1030066584=7, srv1780362658=144, srv1920450646=180, srv1845305826=161, srv1643424013=115, srv1430226101=65, srv1409787229=61, srv1863249463=163, srv1747635254=133, srv708858424=341, srv1881095777=170, srv1951645930=193, srv1562826039=99, srv1836369165=157, srv1852940136=162, srv1770468221=141, srv660981288=333, srv972638489=383, srv709170532=342, srv1622463009=111, srv1335747203=47, srv235268590=245, srv921824141=377, srv990023924=390, srv716170866=344, srv1443619594=70, srv405305233=279, srv1466097260=78, srv334170973=268, srv1580640866=101, srv616678438=320, srv1519301687=89, srv1678782646=119, srv1841621089=159, srv1889005094=174, srv226232867=240, srv215922093=237, srv2077590015=222, srv73808585=347, srv924134719=378, srv1210560147=34, srv995187503=391, srv1326088402=45, srv1258737796=36, srv1654198814=116, srv597958105=317, srv1041104783=11, srv47308468=290, srv1375534047=54, srv685729971=337, srv239265893=248, srv658473027=332, srv1863812338=164, srv2030327589=210, srv1865727051=165, srv1966014263=198, srv1918933599=179, srv277439410=257, srv1561336983=98, srv174304497=132, srv342669767=269, srv1319746791=44, srv518920838=301, srv211734275=230, srv328361724=265, srv170771633=123, srv1209256944=33, srv8074845=356, srv126144165=37, srv188309558=171, srv422790353=281, srv1690391146=121, srv1063866963=15, srv1939466509=188, srv1132412686=24, srv1470049986=81, srv332364980=267, srv722896847=345, srv1911970104=178, srv46270497=287, srv1958224648=195, srv215306234=236, srv1082706875=18, srv386960112=275, srv363934942=272, srv522208031=304, srv1964568792=197, srv1370121086=52, srv621384629=324, srv1450214316=71, srv2018394113=209, srv90278947=374, srv1591440771=106, srv302642116=262, srv2105342442=228, srv1774440052=142, srv851818621=366, srv1612885301=110, srv1002851188=1, srv1175532601=29, srv788941553=354, srv1070789205=16, srv1435661802=67, srv1340659479=48, srv464628224=288, srv302079059=261, srv364902835=273, srv1296247533=42, srv1719267642=127, srv1801203704=150, srv813908975=357, srv1751794801=134, srv421765681=280, srv198790226=203, srv688395360=340, srv1633574592=113, srv793778653=355, srv97388630=384, srv970365640=382, srv1475264596=82, srv636656835=326, srv1792616835=147, srv827780146=363, srv1974229149=200, srv94907264=381, srv1029182373=6, srv1804766003=152, srv1397880908=58, srv2135612857=234, srv2064634000=217, srv1926843705=182, srv2059895409=216, srv646549843=328, srv643381159=327, srv1043352574=13, srv766434902=351, srv1372763136=53, srv1796301954=149, srv1208361250=32, srv194961136=192, srv1940719515=189, srv1588295920=104, srv838795326=364, srv1450607629=72, srv200156181=205, srv1461052119=76, srv1111818990=21, srv494827872=296, srv1038080028=9, srv398153156=277, srv2133724122=233, srv440997914=283, srv619858955=322, srv711495846=343, srv2105270694=227, srv1602422844=108, srv262866598=253, srv1539605103=92, srv1889562477=175, srv275239050=256, srv687603632=338, srv619926035=323, srv231223441=242, srv1736002592=129, srv1871081574=168, srv1809489670=153, srv2093596131=224, srv1434826896=66, srv186715352=166, srv1403413041=60, srv1006807174=2, srv1928272161=183, srv329804278=266, srv1361295912=51, srv251425674=250, srv875223377=371, srv1972132400=199, srv1795550894=148, srv1712837494=125, srv1440357665=69, srv1769317929=140, srv528923351=306, srv1455530122=74, srv857497921=367, srv1894505235=176, srv1544254758=94, srv1457601731=75, srv1740080282=130, srv615063069=319, srv454439491=286, srv1884639409=173, srv824875897=362, srv1345218827=49, srv1295292579=41, srv225244796=239, srv86857835=370, srv1010535505=3, srv297573258=260, srv1952717107=194, srv520254835=303, srv864587510=369, srv1839852385=158, srv1038286195=10, srv231806720=243, srv817271635=358, srv731336893=346, srv451297120=285, srv1034861358=8, srv1177362198=30, srv677326828=336, srv1281418635=39, srv775917271=352, srv1419491722=63, srv1884158310=172, srv388407222=276, srv444024667=284, srv113364701=25, srv1778469135=143, srv1315568363=43, srv1327077420=46, srv1716606625=126, srv1190679768=31, srv1418571541=62, srv1558740573=96, srv236904021=246, srv531654159=307, srv50062706=298, srv1589581786=105, srv262550331=252, srv884454547=372, srv1606557611=109, srv819401725=359, srv1454049150=73, srv1694410448=122, srv1126832579=23, srv1147721027=27, srv998113375=392} racks are {rack=0} 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:16,467 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:16,468 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:16,469 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:16,470 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:16,471 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:16,471 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:16,472 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:16,473 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:16,474 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:16,475 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=393, number of racks=1 2024-11-07T18:27:16,475 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,475 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Table specific (table23) - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,476 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=-1.0 2024-11-07T18:27:16,476 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 577.0 etc. 2024-11-07T18:27:16,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,476 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,477 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2106014212=0, srv72159367=1} racks are {rack=0} 2024-11-07T18:27:16,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,477 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,478 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,478 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=2, number of racks=1 2024-11-07T18:27:16,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,478 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=3200 2024-11-07T18:27:16,521 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 43 ms to try 3200 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,555 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1700167162=0, srv822231749=1} racks are {rack=0} 2024-11-07T18:27:16,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,557 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=2, number of racks=1 2024-11-07T18:27:16,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,557 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1564550482=0, srv437574263=1} racks are {rack=0} 2024-11-07T18:27:16,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,558 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=2, number of racks=1 2024-11-07T18:27:16,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,558 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,558 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,558 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1844244008=1, srv1106775704=0} racks are {rack=0} 2024-11-07T18:27:16,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,559 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=2, number of racks=1 2024-11-07T18:27:16,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,559 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,559 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,559 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1929352199=0, srv914641189=1} racks are {rack=0} 2024-11-07T18:27:16,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=2, number of racks=1 2024-11-07T18:27:16,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.2888503755054882 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.33333333333333337); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv475201642=0, srv578806171=1} racks are {rack=0} 2024-11-07T18:27:16,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:16,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,560 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,560 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1560209488=0, srv1900557055=1} racks are {rack=0} 2024-11-07T18:27:16,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,560 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,560 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=2, number of racks=1 2024-11-07T18:27:16,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv979359183=1, srv1045965797=0} racks are {rack=0} 2024-11-07T18:27:16,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,561 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,561 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=10, number of hosts=2, number of racks=1 2024-11-07T18:27:16,561 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,561 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.6932409012131716 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.8); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,568 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,571 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv927528088=1, srv1439177213=0} racks are {rack=0} 2024-11-07T18:27:16,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1432, number of hosts=2, number of racks=1 2024-11-07T18:27:16,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,595 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.40878413881917497 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4717368961973279); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,595 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,595 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv58666103=1, srv1727444133=0} racks are {rack=0} 2024-11-07T18:27:16,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,596 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,596 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=53, number of hosts=2, number of racks=1 2024-11-07T18:27:16,596 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,596 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.034662045060658585 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.04000000000000001); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,596 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv967942158=2, srv808087973=1, srv1777452438=0} racks are {rack=0} 2024-11-07T18:27:16,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,597 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,597 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=3, number of racks=1 2024-11-07T18:27:16,597 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896258); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,597 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.5003035261608543, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896258); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=7200 2024-11-07T18:27:16,613 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 16 ms to try 7200 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.5003035261608543 to a new imbalance of 0.004043905257076833. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.3333333333333333); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2063416273=1, srv911742480=2, srv1532762608=0} racks are {rack=0} 2024-11-07T18:27:16,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=3, number of racks=1 2024-11-07T18:27:16,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:16,614 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.25015176308042714 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.2886751345948129); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,614 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv253767299=2, srv2084582228=1, srv1816186650=0} racks are {rack=0} 2024-11-07T18:27:16,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,614 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,614 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,615 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=2, number of hosts=3, number of racks=1 2024-11-07T18:27:16,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.33333333333333337); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,615 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.2888503755054882, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.33333333333333337); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=9600 2024-11-07T18:27:16,635 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 20 ms to try 9600 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.2888503755054882 to a new imbalance of 0.0030329289428076256. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.25); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,635 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv477256633=2, srv1918857556=1, srv1617558388=0} racks are {rack=0} 2024-11-07T18:27:16,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,635 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,635 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=3, number of racks=1 2024-11-07T18:27:16,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,636 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=7200 2024-11-07T18:27:16,647 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 12 ms to try 7200 different iterations. Found a solution that moves 2 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.008087810514153667. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.6666666666666666); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,647 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2113886218=1, srv1710661460=0, srv53595789=2} racks are {rack=0} 2024-11-07T18:27:16,647 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,648 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,648 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=3, number of racks=1 2024-11-07T18:27:16,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,648 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=9600 2024-11-07T18:27:16,663 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 15 ms to try 9600 different iterations. Found a solution that moves 2 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,663 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,663 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv622819086=1, srv796917863=2, srv1928910018=0} racks are {rack=0} 2024-11-07T18:27:16,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,664 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,664 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=20, number of hosts=3, number of racks=1 2024-11-07T18:27:16,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4871794871794873); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,664 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.42216593343109815, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4871794871794873); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=96000 2024-11-07T18:27:16,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 178 ms to try 96000 different iterations. Found a solution that moves 13 regions; Going from a computed imbalance of 0.42216593343109815 to a new imbalance of 0.003942807625649913. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.325); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,842 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv988081124=3, srv1064154293=0, srv767957387=2, srv1204679466=1} racks are {rack=0} 2024-11-07T18:27:16,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,842 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,842 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=3, number of hosts=4, number of racks=1 2024-11-07T18:27:16,842 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.29457175359290033); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,843 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.25526148491585815, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.29457175359290033); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=19200 2024-11-07T18:27:16,869 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 27 ms to try 19200 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.25526148491585815 to a new imbalance of 0.0020219526285384167. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.16666666666666666); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,869 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1118799123=0, srv1273138925=1, srv552251908=2, srv836895310=3} racks are {rack=0} 2024-11-07T18:27:16,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,869 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,869 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=4, number of racks=1 2024-11-07T18:27:16,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,870 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=12800 2024-11-07T18:27:16,887 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 18 ms to try 12800 different iterations. Found a solution that moves 3 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.009098786828422877. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.75); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,888 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1826303412=1, srv2104648648=2, srv1321751141=0, srv889739201=3} racks are {rack=0} 2024-11-07T18:27:16,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,888 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,888 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=5, number of hosts=4, number of racks=1 2024-11-07T18:27:16,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,888 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=16000 2024-11-07T18:27:16,911 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 23 ms to try 16000 different iterations. Found a solution that moves 3 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.007279029462738302. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.6); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,911 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,911 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1518858709=1, srv413981882=2, srv1470111516=0, srv641734819=3} racks are {rack=0} 2024-11-07T18:27:16,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,912 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,912 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-07T18:27:16,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896257); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,912 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.5003035261608542, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896257); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=38400 2024-11-07T18:27:16,970 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 58 ms to try 38400 different iterations. Found a solution that moves 6 regions; Going from a computed imbalance of 0.5003035261608542 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,970 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:16,970 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1330007863=1, srv1557361628=2, srv1169586131=0, srv2038631216=3} racks are {rack=0} 2024-11-07T18:27:16,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:16,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:16,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:16,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:16,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:16,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:16,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:16,971 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:16,971 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-07T18:27:16,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7071067811865475); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:16,971 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.6127441778046339, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7071067811865475); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=25600 2024-11-07T18:27:17,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 37 ms to try 25600 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.6127441778046339 to a new imbalance of 0.006065857885615251. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:17,008 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:17,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv192517087=2, srv1864243906=0, srv893067990=3, srv1871987862=1} racks are {rack=0} 2024-11-07T18:27:17,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:17,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:17,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:17,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:17,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:17,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:17,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:17,008 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:17,008 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-07T18:27:17,008 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7907604410896715); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:17,009 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.6852343510309111, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.7907604410896715); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=22400 2024-11-07T18:27:17,042 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 34 ms to try 22400 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.6852343510309111 to a new imbalance of 0.006932409012131715. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.5714285714285714); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:17,042 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:17,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1016246172=0, srv1193474421=1, srv966644167=3, srv1386745550=2} racks are {rack=0} 2024-11-07T18:27:17,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:17,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:17,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:17,042 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:17,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:17,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:17,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:17,042 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:17,043 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=4, number of racks=1 2024-11-07T18:27:17,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:17,043 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=19200 2024-11-07T18:27:17,072 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 29 ms to try 19200 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.008087810514153667. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.6666666666666666); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:17,072 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:17,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1332966528=1, srv527597570=3, srv1108316212=0, srv1968659475=2} racks are {rack=0} 2024-11-07T18:27:17,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:17,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:17,072 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:17,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:17,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:17,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:17,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=7, number of hosts=4, number of racks=1 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:17,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.0962834585018294 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.11111111111111113); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1028244093=0, srv297838040=3, srv1219397566=1, srv1307964266=2} racks are {rack=0} 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:17,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:17,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:17,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:17,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=8, number of hosts=4, number of racks=1 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:17,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.17331022530329285 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.19999999999999996); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv2068270570=1, srv494016661=3, srv1525327680=0, srv2077836316=2} racks are {rack=0} 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:17,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:17,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:17,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:17,073 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:17,073 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=7, number of hosts=4, number of racks=1 2024-11-07T18:27:17,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:17,073 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=22400 2024-11-07T18:27:17,107 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 34 ms to try 22400 different iterations. Found a solution that moves 5 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.008665511265164644. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.7142857142857143); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:17,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:17,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1531958841=1, srv1816084213=3, srv150534638=0, srv1794629849=2, srv293333593=4} racks are {rack=0} 2024-11-07T18:27:17,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:17,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:17,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:17,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:17,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:17,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:17,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:17,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:17,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:17,108 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:17,108 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=5, number of racks=1 2024-11-07T18:27:17,108 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:17,108 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.22705408170595567 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.26202041028867284); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:17,117 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:17,119 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv92389915=5, srv1946023449=2, srv1660845083=1, srv1979921573=3, srv820327748=4, srv1602076855=0} racks are {rack=0} 2024-11-07T18:27:17,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:17,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:17,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:17,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:17,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:17,143 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:17,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:17,143 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:17,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:17,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:17,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:17,144 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:17,144 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1500, number of hosts=6, number of racks=1 2024-11-07T18:27:17,147 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4440183710462697); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:17,147 WARN [Time-limited test {}] balancer.StochasticLoadBalancer(548): calculatedMaxSteps:14448000 for loadbalancer's stochastic walk is larger than maxSteps:1000000. Hence load balancing may not work well. Setting parameter "hbase.master.balancer.stochastic.runMaxSteps" to true can overcome this issue.(This config change does not require service restart) 2024-11-07T18:27:17,148 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.38476461962415054, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4440183710462697); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=1000000 2024-11-07T18:27:29,781 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 12636 ms to try 1000000 different iterations. Found a solution that moves 1016 regions; Going from a computed imbalance of 0.38476461962415054 to a new imbalance of 0.004094957881584781. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.33754152823920264); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:29,792 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:29,793 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1967706612=4, srv1615771011=2, srv729596324=5, srv1277034603=1, srv1911947157=3, srv1209863879=0} racks are {rack=0} 2024-11-07T18:27:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:29,819 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:29,819 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=1500, number of hosts=6, number of racks=1 2024-11-07T18:27:29,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.3438084096789955); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:29,823 WARN [Time-limited test {}] balancer.StochasticLoadBalancer(548): calculatedMaxSteps:16800000 for loadbalancer's stochastic walk is larger than maxSteps:1000000. Hence load balancing may not work well. Setting parameter "hbase.master.balancer.stochastic.runMaxSteps" to true can overcome this issue.(This config change does not require service restart) 2024-11-07T18:27:29,823 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.2979275647131677, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.3438084096789955); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=1000000 2024-11-07T18:27:43,629 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 13809 ms to try 1000000 different iterations. Found a solution that moves 923 regions; Going from a computed imbalance of 0.2979275647131677 to a new imbalance of 0.003199306759098787. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.26371428571428573); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:43,631 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:43,631 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv914416412=12, srv969325033=14, srv727152572=11, srv338274617=8, srv50854766=9, srv1324041156=2, srv1730299570=4, srv1853172804=5, srv2066274559=7, srv109431226=0, srv2060312817=6, srv574854663=10, srv928687959=13, srv1161620175=1, srv1409653985=3} racks are {rack=0} 2024-11-07T18:27:43,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:43,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:43,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:43,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:43,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:43,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:43,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:43,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:43,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:43,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:43,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:43,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:43,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:43,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:43,632 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:43,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:43,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:43,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:43,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:43,632 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:43,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=15, number of hosts=15, number of racks=1 2024-11-07T18:27:43,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.12507588154021357 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.14433756729740646); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:43,633 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:43,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1434605725=1, srv1306912300=0, srv611542770=7, srv494563363=6, srv1544641689=2, srv219871728=3, srv967559668=9, srv440891520=5, srv672368901=8, srv236028946=4} racks are {rack=0} 2024-11-07T18:27:43,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:43,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:43,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:43,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:43,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:43,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:43,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:43,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:43,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:43,633 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:43,633 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:43,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:43,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:43,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:43,634 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:43,634 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=10, number of hosts=10, number of racks=1 2024-11-07T18:27:43,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:43,634 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=80000 2024-11-07T18:27:43,751 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 117 ms to try 80000 different iterations. Found a solution that moves 9 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.010918544194107453. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.9); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:43,751 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:43,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1557841072=3, srv862247508=8, srv1120694067=1, srv328554208=6, srv1568646694=4, srv10470503=0, srv1396296315=2, srv357171307=7, srv992513701=9, srv1900356734=5} racks are {rack=0} 2024-11-07T18:27:43,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:43,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:43,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:43,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:43,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:43,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:43,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:43,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:43,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:43,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:43,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:43,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:43,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:43,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:43,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:43,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:43,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:43,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:43,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:43,751 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:43,751 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=6, number of hosts=10, number of racks=1 2024-11-07T18:27:43,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:43,752 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.055531997651093117 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.06408392528936147); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:43,752 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:43,752 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv750449816=7, srv906125369=9, srv1132108174=0, srv815297191=8, srv1650071966=4, srv1690738917=5, srv1418275876=2, srv449063353=6, srv1253787729=1, srv1634641074=3} racks are {rack=0} 2024-11-07T18:27:43,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:43,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:43,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:43,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:43,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:43,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:43,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:43,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:43,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:43,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:43,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:43,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:43,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:43,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:43,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:43,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:43,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:43,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:43,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:43,753 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:43,753 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=54, number of hosts=10, number of racks=1 2024-11-07T18:27:43,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999999); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:43,753 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164644, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999999); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=432000 2024-11-07T18:27:44,591 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 838 ms to try 432000 different iterations. Found a solution that moves 48 regions; Going from a computed imbalance of 0.8665511265164644 to a new imbalance of 0.01078374735220489. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.8888888888888888); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:44,591 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:44,591 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv140220862=1, srv919487285=9, srv208226193=4, srv580021082=6, srv505977969=5, srv1988433759=3, srv769314452=8, srv631320301=7, srv1516640236=2, srv1059764723=0} racks are {rack=0} 2024-11-07T18:27:44,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:44,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:44,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:44,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:44,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:44,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:44,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:44,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:44,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:44,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:44,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:44,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:44,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:44,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:44,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:44,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:44,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:44,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:44,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:44,592 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:44,592 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=55, number of hosts=10, number of racks=1 2024-11-07T18:27:44,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:44,592 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=440000 2024-11-07T18:27:45,457 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 865 ms to try 440000 different iterations. Found a solution that moves 49 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.010808255868914448. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.8909090909090909); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:45,458 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:45,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv847995617=9, srv1875167946=6, srv1948520508=8, srv1777661204=4, srv1603700402=2, srv161035617=3, srv1916073152=7, srv103248464=0, srv1177013423=1, srv1835202963=5} racks are {rack=0} 2024-11-07T18:27:45,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:45,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:45,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:45,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:45,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:45,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:45,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:45,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:45,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:45,458 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:45,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:45,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:45,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:45,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:45,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:45,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:45,458 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:45,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:45,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:45,459 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:45,459 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=56, number of hosts=10, number of racks=1 2024-11-07T18:27:45,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:45,459 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=448000 2024-11-07T18:27:46,339 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 880 ms to try 448000 different iterations. Found a solution that moves 50 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.010831889081455806. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.8928571428571429); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:46,339 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:46,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1150071478=1, srv1171750741=2, srv2141847967=7, srv1479321203=3, srv1953590397=6, srv1496489818=4, srv752312933=9, srv597350046=8, srv1843440199=5, srv1108957157=0} racks are {rack=0} 2024-11-07T18:27:46,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:46,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:46,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:46,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:46,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:46,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:46,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:46,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:46,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:46,339 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:46,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:46,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:46,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:46,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:46,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:46,339 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:46,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:46,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:46,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:46,340 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:46,340 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=16, number of hosts=10, number of racks=1 2024-11-07T18:27:46,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:46,340 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164645, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=1.0, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=128000 2024-11-07T18:27:46,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 207 ms to try 128000 different iterations. Found a solution that moves 14 regions; Going from a computed imbalance of 0.8665511265164645 to a new imbalance of 0.01061525129982669. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.875); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:46,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:46,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv172453150=1, srv1766228198=2, srv976887542=9, srv154914114=0, srv95973502=8, srv306984971=4, srv760733145=7, srv53235282=5, srv207683285=3, srv603285778=6} racks are {rack=0} 2024-11-07T18:27:46,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:46,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:46,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:46,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:46,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:46,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:46,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:46,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:46,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:46,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:46,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:46,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:46,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:46,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:46,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:46,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:46,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:46,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:46,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:46,547 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:46,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=8, number of hosts=10, number of racks=1 2024-11-07T18:27:46,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:46,547 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.30649131741006164 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.35369098029121115); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:46,547 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:46,547 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv378866185=4, srv680767040=6, srv1026583671=0, srv1881000653=3, srv730481655=8, srv1098840991=1, srv1251718856=2, srv53785207=5, srv686628615=7, srv919661603=9} racks are {rack=0} 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=9, number of hosts=10, number of racks=1 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.34662045060658575 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.39999999999999997); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv183581085=4, srv1339767116=2, srv760054024=7, srv1329371195=1, srv1613171488=3, srv794071139=8, srv1144750878=0, srv834559737=9, srv217339404=6, srv1849161035=5} racks are {rack=0} 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=10, number of hosts=10, number of racks=1 2024-11-07T18:27:46,548 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:46,548 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.3851338340073175 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.4444444444444444); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:46,549 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:46,549 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1099142019=1, srv1451783965=2, srv107874776=0, srv1612164952=4, srv1519941428=3, srv1904029086=5, srv2072393924=8, srv45964086=9, srv2023436565=6, srv2033440709=7} racks are {rack=0} 2024-11-07T18:27:46,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:46,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:46,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:46,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:46,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:46,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:46,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:46,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:46,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:46,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:46,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:46,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:46,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:46,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:46,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:46,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:46,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:46,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:46,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:46,550 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:46,550 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=123, number of hosts=10, number of racks=1 2024-11-07T18:27:46,550 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:46,550 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.8002334382626535 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.923469387755102); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:46,551 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:46,551 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv296576452=4, srv2112399558=2, srv679831139=6, srv910119115=9, srv1461277142=0, srv50579311=5, srv831113270=7, srv2125590794=3, srv1959124506=1, srv866613571=8} racks are {rack=0} 2024-11-07T18:27:46,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:46,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:46,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:46,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:46,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:46,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:46,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:46,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:46,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:46,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:46,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:46,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:46,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:46,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:46,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:46,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:46,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:46,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:46,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:46,552 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:46,552 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=155, number of hosts=10, number of racks=1 2024-11-07T18:27:46,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:46,552 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.8131812243798632 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9384111329343621); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:46,552 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:46,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1794991740=5, srv457672144=7, srv1559622004=2, srv1507497270=1, srv1700433117=3, srv1136221026=0, srv1782170181=4, srv437350947=6} racks are {rack=0} 2024-11-07T18:27:46,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:46,553 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:46,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:46,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:46,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:46,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:46,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:46,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:46,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:46,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:46,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:46,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:46,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:46,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:46,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:46,554 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:46,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=14, number of hosts=8, number of racks=1 2024-11-07T18:27:46,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:46,554 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.05755254949858986 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0664156421213727); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:46,554 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:46,554 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv945309235=7, srv103668905=0, srv1764153268=2, srv2117998869=4, srv2114193763=3, srv707007025=6, srv1651918633=1, srv337640312=5} racks are {rack=0} 2024-11-07T18:27:46,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:46,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:46,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:46,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:46,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:46,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:46,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:46,555 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:46,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:46,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:46,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:46,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:46,555 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:46,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:46,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:46,556 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:46,556 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=14, number of hosts=8, number of racks=1 2024-11-07T18:27:46,556 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:46,556 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.06673965003400768 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.07701755613924488); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:46,557 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:46,557 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1539100216=3, srv1556592533=4, srv4481724=6, srv1064029757=0, srv1990342359=5, srv1094874851=1, srv521106444=7, srv1488068733=2} racks are {rack=0} 2024-11-07T18:27:46,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:46,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:46,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:46,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:46,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:46,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:46,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:46,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:46,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:46,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:46,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:46,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:46,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:46,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:46,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:46,562 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:46,562 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=130, number of hosts=8, number of racks=1 2024-11-07T18:27:46,563 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:46,563 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.28093705674099306 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.32420136347910594); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:46,564 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:46,564 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv572579753=5, srv59557959=6, srv114898550=1, srv1184135430=2, srv178148029=4, srv113637302=0, srv938368882=7, srv133347020=3} racks are {rack=0} 2024-11-07T18:27:46,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:46,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:46,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:46,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:46,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:46,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:46,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:46,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:46,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:46,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:46,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:46,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:46,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:46,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:46,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:46,574 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:46,574 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=140, number of hosts=8, number of racks=1 2024-11-07T18:27:46,575 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(253): Slop is less than zero, not checking for sloppiness. 2024-11-07T18:27:46,575 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.07533492111851356 <= threshold(1.0). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 1.0 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.08693649897076465); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:46,576 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:46,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv1188793745=0, srv1491851494=2, srv1412220623=1, srv928211147=4, srv829734481=3} racks are {rack=0} 2024-11-07T18:27:46,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:46,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:46,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:46,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:46,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:46,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:46,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:46,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:46,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:46,576 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:46,576 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=5, number of hosts=5, number of racks=1 2024-11-07T18:27:46,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.25); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:46,576 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.21663778162911612, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.25); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=80000 2024-11-07T18:27:46,692 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 116 ms to try 80000 different iterations. Found a solution that moves 4 regions; Going from a computed imbalance of 0.21663778162911612 to a new imbalance of 0.0024263431542461008. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.2); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:46,703 DEBUG [Time-limited test {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-11-07T18:27:46,707 DEBUG [Time-limited test {}] balancer.BalancerClusterState(202): Hosts are {srv445179937=281, srv1969127346=199, srv633688188=323, srv1630867918=108, srv498598128=299, srv1403282585=68, srv1953765036=194, srv1409219779=69, srv541421159=307, srv1675097657=124, srv902411836=375, srv1955952700=197, srv1300359847=48, srv861872185=364, srv1931594400=188, srv1490277574=81, srv1691838340=128, srv1365431461=62, srv1818766643=161, srv1891058744=178, srv2120568363=222, srv1949214571=193, srv1061972034=6, srv1257837076=38, srv926406071=380, srv1664601606=119, srv157904784=103, srv68643060=337, srv1548446221=96, srv1461088955=76, srv656793409=326, srv1656711281=115, srv483137900=294, srv1318921983=53, srv2119202107=221, srv821918291=356, srv876941389=369, srv494742437=298, srv1258068878=39, srv2099192813=219, srv805428582=353, srv524872942=303, srv1380201925=65, srv1256952899=37, srv1339723632=56, srv242645258=233, srv1724993205=137, srv1525372675=90, srv933461274=381, srv1840375356=166, srv1522915447=89, srv826481097=358, srv420161749=275, srv1026345640=1, srv512077063=302, srv1947019580=192, srv376190435=262, srv1736496716=139, srv153310219=92, srv727590178=340, srv1173604576=21, srv1109528268=13, srv1681413035=126, srv416212683=274, srv314375824=250, srv1519531672=87, srv1631352647=109, srv1739202734=140, srv1700624614=130, srv1787918416=153, srv662382971=329, srv770056094=346, srv356147374=255, srv1740120861=142, srv192680841=186, srv18120452=159, srv562751251=312, srv876153342=367, srv1695975559=129, srv2038041517=211, srv386865753=265, srv942719688=383, srv876323353=368, srv1095193325=10, srv1139715213=18, srv1351085813=59, srv457997811=288, srv1442382116=74, srv1852063900=171, srv1704643450=132, srv816707510=355, srv145011297=75, srv1410756149=70, srv195447295=196, srv248525253=235, srv1415162756=71, srv786089371=350, srv1188695409=23, srv1759248766=147, srv24363895=234, srv978899381=388, srv2147482761=227, srv256949278=237, srv1073266024=8, srv1674698743=123, srv767167172=345, srv987984107=391, srv1889939255=177, srv226155986=229, srv535289640=306, srv1196907815=26, srv1121118740=16, srv530045499=304, srv395232390=268, srv174003368=141, srv323287864=252, srv473114695=293, srv120690885=29, srv2140925045=223, srv792519823=352, srv153684871=94, srv1553479477=98, srv133596310=54, srv400656496=269, srv604155179=317, srv182323719=162, srv785960930=349, srv892588902=371, srv2062600901=214, srv737553837=341, srv1508864857=85, srv1565419759=100, srv1362278869=61, srv501205757=301, srv1421202066=72, srv940692982=382, srv438799507=280, srv415647174=273, srv772706644=347, srv1481494587=79, srv1635367145=110, srv1927295429=187, srv414232327=272, srv828659889=359, srv216888220=228, srv614818476=320, srv784005218=348, srv235134506=231, srv1035721887=3, srv805930204=354, srv1547507531=95, srv1268444143=42, srv1740651662=143, srv1868726387=174, srv374242988=261, srv1113842992=15, srv1521692263=88, srv1743599158=144, srv2027215538=210, srv1810190220=157, srv1754065722=146, srv58432490=316, srv445565525=282, srv492126525=297, srv900748118=374, srv954341876=385, srv183548463=165, srv106897197=7, srv641533009=325, srv1666330878=121, srv1674681084=122, srv826134543=357, srv1789597182=154, srv1270448875=43, srv344280243=254, srv357780792=256, srv360244344=257, srv2075729762=218, srv1901276365=181, srv1192565387=24, srv984858527=389, srv1705200080=133, srv445938785=283, srv877662903=370, srv683375509=336, srv1111705515=14, srv380607943=264, srv970170046=386, srv842200162=361, srv1035479182=2, srv237186581=232, srv858717082=363, srv205554152=213, srv1124524348=17, srv1382573455=66, srv1534218884=93, srv389679898=267, srv289466102=246, srv1060769357=5, srv1489442053=80, srv1235682177=35, srv433884114=279, srv622673317=321, srv464684033=289, srv1730892069=138, srv870519530=365, srv1622560572=107, srv1967870795=198, srv92200178=378, srv1663495239=118, srv1816358234=160, srv370813707=260, srv720161268=338, srv829549050=360, srv1986090496=202, srv993475894=392, srv984962223=390, srv1847639159=168, srv1232243248=34, srv1661118120=117, srv678551517=333, srv172310630=136, srv1902693500=182, srv1147935269=19, srv1701428655=131, srv299993528=247, srv454690458=287, srv1358877019=60, srv1876976610=176, srv1749154581=145, srv1850216675=170, srv681166048=335, srv1201467640=28, srv1568047869=101, srv323804765=253, srv1714551353=135, srv1012316939=0, srv1825775388=164, srv500553640=300, srv1863470432=173, srv471971711=292, srv546890520=308, srv1258848160=40, srv2600195=238, srv282045417=245, srv1780240892=152, srv280521839=242, srv558882594=310, srv1942347669=190, srv2000097120=203, srv662168131=328, srv1224084321=31, srv1804914243=156, srv1198550376=27, srv2016403069=209, srv1348390332=58, srv451653572=284, srv1921858919=184, srv1709628907=134, srv850893799=362, srv483949444=295, srv1376426435=64, srv318743715=251, srv387747317=266, srv1560578010=99, srv1283945139=44, srv1505498784=84, srv609163313=318, srv489090739=296, srv426802966=277, srv639253090=324, srv28057007=243, srv1304384655=49, srv1823872934=163, srv1196236938=25, srv1778051760=151, srv1775255631=150, srv1372046861=63, srv108221536=9, srv1172467122=20, srv65681098=327, srv740749590=342, srv16103015=106, srv923037634=379, srv1493723795=83, srv1766357719=149, srv255272493=236, srv280644674=244, srv907921719=376, srv125037295=36, srv1803191992=155, srv2015217064=208, srv45266871=285, srv1643732213=112, srv2066255307=215, srv1226929006=32, srv1895880451=180, srv1260525615=41, srv626258928=322, srv1941561951=189, srv1221999604=30, srv1310761829=52, srv411474732=271, srv426479378=276, srv265051313=240, srv1227844162=33, srv1840962680=167, srv207310924=217, srv669156269=330, srv1549449791=97, srv310849613=249, srv1305267347=50, srv1478570210=78, srv15281397=91, srv453505197=286, srv1307709058=51, srv1346981054=57, srv1643712884=111, srv1605819067=105, srv1983263445=201, srv149034281=82, srv2005424960=205, srv1661109936=116, srv671793901=331, srv264944480=239, srv36779122=259, srv67592821=332, srv1288490002=45, srv894345310=372, srv942744767=384, srv790835399=351, srv1337265973=55, srv1050554083=4, srv233193312=230, srv1401885172=67, srv1849841014=169, srv554458932=309, srv1601150591=104, srv1665219608=120, srv1871555114=175, srv2141373517=224, srv1910283603=183, srv275897422=241, srv2142805438=225, srv974516312=387, srv1945148765=191, srv1513705656=86, srv361100303=258, srv468750413=291, srv1974895663=200, srv1101608755=11, srv129250583=46, srv1675880839=125, srv613337573=319, srv572436388=315, srv430150188=278, srv380153314=263, srv2069721112=216, srv1107814653=12, srv310504057=248, srv721209026=339, srv2118956566=220, srv1892624422=179, srv1811016620=158, srv762003002=344, srv1759627437=148, srv2145379358=226, srv1578119329=102, srv1299128123=47, srv87352217=366, srv1923782703=185, srv560922523=311, srv169066460=127, srv467491126=290, srv897366256=373, srv1954030742=195, srv2008115191=206, srv566021558=314, srv751533717=343, srv1474678717=77, srv2009256004=207, srv2048962504=212, srv1178736390=22, srv532156794=305, srv680409942=334, srv1439681344=73, srv1645339883=113, srv565418892=313, srv165588077=114, srv1858441269=172, srv916593175=377, srv200458596=204, srv407436017=270} racks are {rack=0} 2024-11-07T18:27:46,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-07T18:27:46,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 1 is on host 1 2024-11-07T18:27:46,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 2 is on host 2 2024-11-07T18:27:46,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 3 is on host 3 2024-11-07T18:27:46,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 4 is on host 4 2024-11-07T18:27:46,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 5 is on host 5 2024-11-07T18:27:46,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 6 is on host 6 2024-11-07T18:27:46,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 7 is on host 7 2024-11-07T18:27:46,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 8 is on host 8 2024-11-07T18:27:46,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 9 is on host 9 2024-11-07T18:27:46,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 10 is on host 10 2024-11-07T18:27:46,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 11 is on host 11 2024-11-07T18:27:46,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 12 is on host 12 2024-11-07T18:27:46,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 13 is on host 13 2024-11-07T18:27:46,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 14 is on host 14 2024-11-07T18:27:46,708 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 15 is on host 15 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 16 is on host 16 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 17 is on host 17 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 18 is on host 18 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 19 is on host 19 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 20 is on host 20 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 21 is on host 21 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 22 is on host 22 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 23 is on host 23 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 24 is on host 24 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 25 is on host 25 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 26 is on host 26 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 27 is on host 27 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 28 is on host 28 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 29 is on host 29 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 30 is on host 30 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 31 is on host 31 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 32 is on host 32 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 33 is on host 33 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 34 is on host 34 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 35 is on host 35 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 36 is on host 36 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 37 is on host 37 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 38 is on host 38 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 39 is on host 39 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 40 is on host 40 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 41 is on host 41 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 42 is on host 42 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 43 is on host 43 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 44 is on host 44 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 45 is on host 45 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 46 is on host 46 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 47 is on host 47 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 48 is on host 48 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 49 is on host 49 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 50 is on host 50 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 51 is on host 51 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 52 is on host 52 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 53 is on host 53 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 54 is on host 54 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 55 is on host 55 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 56 is on host 56 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 57 is on host 57 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 58 is on host 58 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 59 is on host 59 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 60 is on host 60 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 61 is on host 61 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 62 is on host 62 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 63 is on host 63 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 64 is on host 64 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 65 is on host 65 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 66 is on host 66 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 67 is on host 67 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 68 is on host 68 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 69 is on host 69 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 70 is on host 70 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 71 is on host 71 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 72 is on host 72 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 73 is on host 73 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 74 is on host 74 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 75 is on host 75 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 76 is on host 76 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 77 is on host 77 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 78 is on host 78 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 79 is on host 79 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 80 is on host 80 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 81 is on host 81 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 82 is on host 82 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 83 is on host 83 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 84 is on host 84 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 85 is on host 85 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 86 is on host 86 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 87 is on host 87 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 88 is on host 88 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 89 is on host 89 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 90 is on host 90 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 91 is on host 91 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 92 is on host 92 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 93 is on host 93 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 94 is on host 94 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 95 is on host 95 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 96 is on host 96 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 97 is on host 97 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 98 is on host 98 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 99 is on host 99 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 100 is on host 100 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 101 is on host 101 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 102 is on host 102 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 103 is on host 103 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 104 is on host 104 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 105 is on host 105 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 106 is on host 106 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 107 is on host 107 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 108 is on host 108 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 109 is on host 109 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 110 is on host 110 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 111 is on host 111 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 112 is on host 112 2024-11-07T18:27:46,709 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 113 is on host 113 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 114 is on host 114 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 115 is on host 115 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 116 is on host 116 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 117 is on host 117 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 118 is on host 118 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 119 is on host 119 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 120 is on host 120 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 121 is on host 121 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 122 is on host 122 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 123 is on host 123 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 124 is on host 124 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 125 is on host 125 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 126 is on host 126 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 127 is on host 127 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 128 is on host 128 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 129 is on host 129 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 130 is on host 130 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 131 is on host 131 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 132 is on host 132 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 133 is on host 133 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 134 is on host 134 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 135 is on host 135 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 136 is on host 136 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 137 is on host 137 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 138 is on host 138 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 139 is on host 139 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 140 is on host 140 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 141 is on host 141 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 142 is on host 142 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 143 is on host 143 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 144 is on host 144 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 145 is on host 145 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 146 is on host 146 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 147 is on host 147 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 148 is on host 148 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 149 is on host 149 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 150 is on host 150 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 151 is on host 151 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 152 is on host 152 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 153 is on host 153 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 154 is on host 154 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 155 is on host 155 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 156 is on host 156 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 157 is on host 157 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 158 is on host 158 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 159 is on host 159 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 160 is on host 160 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 161 is on host 161 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 162 is on host 162 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 163 is on host 163 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 164 is on host 164 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 165 is on host 165 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 166 is on host 166 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 167 is on host 167 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 168 is on host 168 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 169 is on host 169 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 170 is on host 170 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 171 is on host 171 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 172 is on host 172 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 173 is on host 173 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 174 is on host 174 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 175 is on host 175 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 176 is on host 176 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 177 is on host 177 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 178 is on host 178 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 179 is on host 179 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 180 is on host 180 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 181 is on host 181 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 182 is on host 182 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 183 is on host 183 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 184 is on host 184 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 185 is on host 185 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 186 is on host 186 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 187 is on host 187 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 188 is on host 188 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 189 is on host 189 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 190 is on host 190 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 191 is on host 191 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 192 is on host 192 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 193 is on host 193 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 194 is on host 194 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 195 is on host 195 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 196 is on host 196 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 197 is on host 197 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 198 is on host 198 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 199 is on host 199 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 200 is on host 200 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 201 is on host 201 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 202 is on host 202 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 203 is on host 203 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 204 is on host 204 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 205 is on host 205 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 206 is on host 206 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 207 is on host 207 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 208 is on host 208 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 209 is on host 209 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 210 is on host 210 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 211 is on host 211 2024-11-07T18:27:46,710 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 212 is on host 212 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 213 is on host 213 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 214 is on host 214 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 215 is on host 215 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 216 is on host 216 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 217 is on host 217 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 218 is on host 218 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 219 is on host 219 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 220 is on host 220 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 221 is on host 221 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 222 is on host 222 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 223 is on host 223 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 224 is on host 224 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 225 is on host 225 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 226 is on host 226 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 227 is on host 227 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 228 is on host 228 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 229 is on host 229 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 230 is on host 230 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 231 is on host 231 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 232 is on host 232 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 233 is on host 233 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 234 is on host 234 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 235 is on host 235 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 236 is on host 236 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 237 is on host 237 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 238 is on host 238 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 239 is on host 239 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 240 is on host 240 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 241 is on host 241 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 242 is on host 242 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 243 is on host 243 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 244 is on host 244 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 245 is on host 245 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 246 is on host 246 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 247 is on host 247 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 248 is on host 248 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 249 is on host 249 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 250 is on host 250 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 251 is on host 251 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 252 is on host 252 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 253 is on host 253 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 254 is on host 254 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 255 is on host 255 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 256 is on host 256 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 257 is on host 257 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 258 is on host 258 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 259 is on host 259 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 260 is on host 260 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 261 is on host 261 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 262 is on host 262 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 263 is on host 263 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 264 is on host 264 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 265 is on host 265 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 266 is on host 266 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 267 is on host 267 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 268 is on host 268 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 269 is on host 269 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 270 is on host 270 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 271 is on host 271 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 272 is on host 272 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 273 is on host 273 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 274 is on host 274 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 275 is on host 275 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 276 is on host 276 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 277 is on host 277 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 278 is on host 278 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 279 is on host 279 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 280 is on host 280 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 281 is on host 281 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 282 is on host 282 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 283 is on host 283 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 284 is on host 284 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 285 is on host 285 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 286 is on host 286 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 287 is on host 287 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 288 is on host 288 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 289 is on host 289 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 290 is on host 290 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 291 is on host 291 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 292 is on host 292 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 293 is on host 293 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 294 is on host 294 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 295 is on host 295 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 296 is on host 296 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 297 is on host 297 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 298 is on host 298 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 299 is on host 299 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 300 is on host 300 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 301 is on host 301 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 302 is on host 302 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 303 is on host 303 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 304 is on host 304 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 305 is on host 305 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 306 is on host 306 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 307 is on host 307 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 308 is on host 308 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 309 is on host 309 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 310 is on host 310 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 311 is on host 311 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 312 is on host 312 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 313 is on host 313 2024-11-07T18:27:46,711 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 314 is on host 314 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 315 is on host 315 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 316 is on host 316 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 317 is on host 317 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 318 is on host 318 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 319 is on host 319 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 320 is on host 320 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 321 is on host 321 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 322 is on host 322 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 323 is on host 323 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 324 is on host 324 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 325 is on host 325 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 326 is on host 326 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 327 is on host 327 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 328 is on host 328 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 329 is on host 329 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 330 is on host 330 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 331 is on host 331 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 332 is on host 332 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 333 is on host 333 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 334 is on host 334 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 335 is on host 335 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 336 is on host 336 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 337 is on host 337 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 338 is on host 338 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 339 is on host 339 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 340 is on host 340 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 341 is on host 341 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 342 is on host 342 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 343 is on host 343 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 344 is on host 344 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 345 is on host 345 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 346 is on host 346 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 347 is on host 347 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 348 is on host 348 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 349 is on host 349 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 350 is on host 350 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 351 is on host 351 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 352 is on host 352 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 353 is on host 353 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 354 is on host 354 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 355 is on host 355 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 356 is on host 356 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 357 is on host 357 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 358 is on host 358 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 359 is on host 359 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 360 is on host 360 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 361 is on host 361 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 362 is on host 362 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 363 is on host 363 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 364 is on host 364 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 365 is on host 365 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 366 is on host 366 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 367 is on host 367 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 368 is on host 368 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 369 is on host 369 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 370 is on host 370 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 371 is on host 371 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 372 is on host 372 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 373 is on host 373 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 374 is on host 374 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 375 is on host 375 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 376 is on host 376 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 377 is on host 377 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 378 is on host 378 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 379 is on host 379 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 380 is on host 380 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 381 is on host 381 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 382 is on host 382 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 383 is on host 383 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 384 is on host 384 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 385 is on host 385 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 386 is on host 386 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 387 is on host 387 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 388 is on host 388 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 389 is on host 389 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 390 is on host 390 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 391 is on host 391 2024-11-07T18:27:46,712 DEBUG [Time-limited test {}] balancer.BalancerClusterState(303): server 392 is on host 392 2024-11-07T18:27:46,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-07T18:27:46,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-07T18:27:46,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-07T18:27:46,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 3 is on rack 0 2024-11-07T18:27:46,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 4 is on rack 0 2024-11-07T18:27:46,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 5 is on rack 0 2024-11-07T18:27:46,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 6 is on rack 0 2024-11-07T18:27:46,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 7 is on rack 0 2024-11-07T18:27:46,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 8 is on rack 0 2024-11-07T18:27:46,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 9 is on rack 0 2024-11-07T18:27:46,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 10 is on rack 0 2024-11-07T18:27:46,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 11 is on rack 0 2024-11-07T18:27:46,712 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 12 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 13 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 14 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 15 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 16 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 17 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 18 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 19 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 20 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 21 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 22 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 23 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 24 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 25 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 26 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 27 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 28 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 29 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 30 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 31 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 32 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 33 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 34 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 35 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 36 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 37 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 38 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 39 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 40 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 41 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 42 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 43 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 44 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 45 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 46 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 47 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 48 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 49 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 50 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 51 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 52 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 53 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 54 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 55 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 56 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 57 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 58 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 59 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 60 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 61 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 62 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 63 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 64 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 65 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 66 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 67 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 68 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 69 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 70 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 71 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 72 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 73 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 74 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 75 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 76 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 77 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 78 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 79 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 80 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 81 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 82 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 83 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 84 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 85 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 86 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 87 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 88 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 89 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 90 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 91 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 92 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 93 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 94 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 95 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 96 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 97 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 98 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 99 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 100 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 101 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 102 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 103 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 104 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 105 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 106 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 107 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 108 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 109 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 110 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 111 is on rack 0 2024-11-07T18:27:46,713 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 112 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 113 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 114 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 115 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 116 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 117 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 118 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 119 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 120 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 121 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 122 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 123 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 124 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 125 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 126 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 127 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 128 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 129 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 130 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 131 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 132 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 133 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 134 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 135 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 136 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 137 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 138 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 139 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 140 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 141 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 142 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 143 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 144 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 145 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 146 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 147 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 148 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 149 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 150 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 151 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 152 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 153 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 154 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 155 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 156 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 157 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 158 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 159 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 160 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 161 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 162 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 163 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 164 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 165 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 166 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 167 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 168 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 169 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 170 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 171 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 172 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 173 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 174 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 175 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 176 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 177 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 178 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 179 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 180 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 181 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 182 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 183 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 184 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 185 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 186 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 187 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 188 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 189 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 190 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 191 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 192 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 193 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 194 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 195 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 196 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 197 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 198 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 199 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 200 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 201 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 202 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 203 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 204 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 205 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 206 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 207 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 208 is on rack 0 2024-11-07T18:27:46,714 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 209 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 210 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 211 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 212 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 213 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 214 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 215 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 216 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 217 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 218 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 219 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 220 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 221 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 222 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 223 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 224 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 225 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 226 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 227 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 228 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 229 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 230 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 231 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 232 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 233 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 234 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 235 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 236 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 237 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 238 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 239 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 240 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 241 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 242 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 243 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 244 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 245 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 246 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 247 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 248 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 249 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 250 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 251 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 252 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 253 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 254 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 255 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 256 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 257 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 258 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 259 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 260 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 261 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 262 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 263 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 264 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 265 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 266 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 267 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 268 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 269 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 270 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 271 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 272 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 273 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 274 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 275 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 276 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 277 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 278 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 279 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 280 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 281 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 282 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 283 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 284 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 285 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 286 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 287 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 288 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 289 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 290 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 291 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 292 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 293 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 294 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 295 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 296 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 297 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 298 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 299 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 300 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 301 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 302 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 303 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 304 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 305 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 306 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 307 is on rack 0 2024-11-07T18:27:46,715 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 308 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 309 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 310 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 311 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 312 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 313 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 314 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 315 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 316 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 317 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 318 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 319 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 320 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 321 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 322 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 323 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 324 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 325 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 326 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 327 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 328 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 329 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 330 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 331 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 332 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 333 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 334 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 335 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 336 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 337 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 338 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 339 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 340 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 341 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 342 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 343 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 344 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 345 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 346 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 347 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 348 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 349 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 350 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 351 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 352 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 353 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 354 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 355 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 356 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 357 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 358 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 359 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 360 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 361 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 362 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 363 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 364 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 365 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 366 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 367 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 368 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 369 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 370 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 371 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 372 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 373 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 374 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 375 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 376 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 377 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 378 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 379 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 380 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 381 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 382 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 383 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 384 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 385 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 386 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 387 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 388 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 389 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 390 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 391 is on rack 0 2024-11-07T18:27:46,716 INFO [Time-limited test {}] balancer.BalancerClusterState(314): server 392 is on rack 0 2024-11-07T18:27:46,716 DEBUG [Time-limited test {}] balancer.BalancerClusterState(319): Number of tables=56, number of hosts=393, number of racks=1 2024-11-07T18:27:46,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(385): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999992); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:46,719 WARN [Time-limited test {}] balancer.StochasticLoadBalancer(548): calculatedMaxSteps:17606400 for loadbalancer's stochastic walk is larger than maxSteps:1000000. Hence load balancing may not work well. Setting parameter "hbase.master.balancer.stochastic.runMaxSteps" to true can overcome this issue.(This config change does not require service restart) 2024-11-07T18:27:46,719 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(556): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.8665511265164638, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.9999999999999992); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=1000000 2024-11-07T18:27:51,617 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(603): Finished computing new moving plan. Computation took 4901 ms to try 1000000 different iterations. Found a solution that moves 55 regions; Going from a computed imbalance of 0.8665511265164638 to a new imbalance of 0.011915077989601387. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.9821428571428571); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.0); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-07T18:27:51,633 INFO [Time-limited test {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-07T18:27:51,633 INFO [Time-limited test {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 577.0 etc. 2024-11-07T18:27:51,638 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: master.balancer.TestBalancerDecision#testBalancerDecisions Thread=13 (was 12) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=286 (was 286), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=105 (was 84) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1957 (was 2644)